<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">import numpy

def simanneal( f, a, b, cooling_factor = 0.8, nrand = 10, \
                   maxiter = 10000, min_gamma = 1e-8, \
                   max_non_improving_steps = 5, verbose = False ):
    """Finds global minimum of continuous function f(x) on [a,b]

    USAGE:
          x_min, fx_min = simanneal( f, a, b )

    INPUT:
          fname          - (function) Objective function
          a, b           - (Integers or Floats) Endpoints of search interval
          cooling_factor - (Float from open interval (0,1)) System cooling
                           factor
          nrand          - (Integer) Maximum number of random trials per
                           iteration; values outside search interval are
                           discarded
          maxiter        - (Positive Integer) maximum number of iterations
                           allowed
          min_gamma      - (Positive Float) termination critera; iteration
                           stops when gamma becomes smaller than this value.
          max_non_improving_steps - (Positive Integer)
          verbose        - (Boolean) True indicates that information about
                           each step should be displayed

    OUTPUT:
          x_min          - (Float) Location of minimum
          fx_min         - (Float) Minimum value of function

    NOTES:
        Uses simulated annealing to find the global minimum of a function of
        one variable.  This was written sometime in the past; unfortunately I
        don't remember where I got the algorithm from :(.

    AUTHOR:
        Jonathan R. Senning &lt;jonathan.senning@gordon.edu&gt;
        Gordon College
        Original Octave Version: April 20, 2005
        Converted to Python: April 18-20, 2008
    """

    # The value of gamma is reduced by a factor of gamma_reduction_factor
    # whenever the the number of non-improving steps exceeds
    # max_non_improving_steps.  The main iteration terminates when gamma
    # becomes smaller than min_gamma or when the number of iterations exceeds
    # maxiter.

#    maxiter   = 1000
#    min_gamma = 1e-8
#    max_non_improving_steps = 5

    # Set the parameters that control the simulated annealing process:
    #
    # alpha: Positive parameter that affects the likelihood that a higher
    # uphill move is made over a lower uphill move.
    #
    # gamma_reduction_factor: This is the cooling factor.  It should be in
    # (0,1).  Values close to 1 result in slow cooling and should allow the
    # best chance of finding the true minimum (analogous to the minimum energy
    # state) but will result in slow convergence.  Values close to 0 will
    # result in fast convergence to a local minimum, but likely not the
    # absolute minimum.

    alpha = 1.0
    random_numbers_per_trial = nrand
    gamma_reduction_factor = cooling_factor

    # Initialize for the iterations.  Gamma controls the width of the normally
    # distributed random numbers used in each iteration.  By initializing to
    # (b-a)/2 we start with random numbers distributed over the entire
    # interval.
    #
    # The starting x value will be at midpoint of interval.
  
    gamma = 0.5 * ( b - a )
    x  = 0.5 * ( a + b )
    fx = f( x )
    x_min  = x
    fx_min = fx
    non_improving_steps = 0

    if verbose:
        print '%4s  %20s %20s %8s %8s  %2s %8s' % \
            ( 'k', 'xmin', 'fxmin', 'x', 'fx', 'ns', 'gamma' )

    # Main loop

    iter = 0
    while gamma &gt;= min_gamma and iter &lt; maxiter:

        # Generate normally distributed random numbers centered around x,
        # rejecting any that lie outside the interval [a,b].

        nr = 0
        while nr &lt;= 0:
            u = numpy.random.normal( x, gamma, random_numbers_per_trial )
            u.sort()
            u = u[u.searchsorted(a):u.searchsorted(b)]
            nr = len( u )

        # Determine the function value at each of the random points, find the
        # minimum value and also the random value that gave it.

        fu = f( u )
        i = fu.argmin()
        fu_min = fu[i]
        u_min = u[i]
        
        # Have we found an improvement over our current center point?  If so,
        # use it; if not, select a new center point chosen from the current
        # random distribution.

        if fu_min &lt; fx:
            x  = u_min
            fx = fu_min
        else:
            p = numpy.exp( alpha * ( fx - fu ) )
            p = p / p.sum()
            t = numpy.random.random()
            i = 0
            s = p[i]
            while s &lt; t and i &lt; nr:
                i = i + 1
                s = s + p[i]
            x  = u[i]
            fx = fu[i]

        # See if we've found an improved minimum.  If we've not found one in a
        # while, reduce gamma (i.e. cool the system) and try again.

        if fx &lt; fx_min:
            x_min  = x
            fx_min = fx
            non_improving_steps = 0
        else:
            non_improving_steps += 1
            if non_improving_steps &gt; max_non_improving_steps:
                non_improving_steps = 0
                gamma = gamma_reduction_factor * gamma
                x  = x_min
                fx = fx_min

        # Done with iteration.  Display status (if requested) and check
        # termination criteria.

        if verbose:
            print '%4d: %20.16f %20.16f %8.5f %8.5f  %2d %8.2e' % \
                ( iter, x_min, fx_min, x, fx, non_improving_steps, gamma )

        iter += 1

        # End of main loop

    return ( x_min, fx_min )
</pre></body></html>