#!/usr/bin/env python

# G.-H. (Sam) Gweon, 06-04-2012
#
#     This program can be used and extended if you are a python lover.
#                                                 (and you should be)
#     Or, it can be read as pseudo-code.
#     Trivial modifications are all that is necessary for simulating an
#         AFM (an anti-ferromagnetic) model.
#     More work is necessary to calculate the correlation function.
#         Convolution routine, such as scipy.signal.convolve2d, is useful.
#
#     This program is provided as is, and may be used at your own risk.

import numpy, random

class MC_Ising2D:

    MAXNBINS = 1000000 # should not be too large -- memory allocated by init

    def __init__ (self, # <<<
            size = 40,
            init_pattern = 0,
            bin_size = 100,
            T = 2.0,
            targets = ['E', 'M']):
        """
        size         :  linear size of the lattice.  40 means 40 x 40 lattice.
        init_pattern :  random by default.  all up (1) if 1.  left half up (1) and right half down (0) if 2.
        bin_size     :  size of the bin for the binning analysis.
        T            :  temperature in unit of J / k_B.  Tc is about 2.269.
        targets      :  what to measure along the way.  "observables"
        """
        # <<< remember how this object was created
        self._dargs = locals ().copy ()
        del self._dargs ['self']
        # >>>
        # <<< check args
        assert type (size) == int
        assert type (init_pattern) == int
        assert type (bin_size) == int
        assert 2 <= size <= 300 # for safety
        assert 0 < bin_size < 1000 # for safety
        assert T > 0
        for target in targets:
            # what to calculate?
            # E = total energy, M = total magnetization
            # Their vairance will be calculated automatically.
            assert target in ['E', 'M']
        # >>>
        # <<< assign initial self._nu, the spin configuration
        if init_pattern == 1: # all up
            self._nu = numpy.ones ((size, size), dtype = numpy.bool)
        elif init_pattern == 2: # half up, half down
            self._nu = numpy.ones ((size, size), dtype = numpy.bool)
            self._nu [size / 2:, :] = 0
        else: # default -- random
            self._nu = numpy.random.randint (
                    2, size = (size, size)).astype (numpy.bool)
        # >>>
        # <<< init/bind other attributes, allocate memory
        self._T = T
        self._targets = targets = \
                [(t_, t_ + '^2', t_ + '^3', t_ + '^4') for t_ in targets]
        self._binned = {} # probably large arrays
        self._curbin = {} # probably small arrays
        self._cumave = {} # cumulative average
        for target, targ_2, targ_3, targ_4 in targets:
            self._binned [target] = numpy.zeros (
                    self.MAXNBINS, dtype = numpy.float64)
            self._binned [targ_2] = numpy.zeros (
                    self.MAXNBINS, dtype = numpy.float64)
            self._binned [targ_3] = numpy.zeros (
                    self.MAXNBINS, dtype = numpy.float64)
            self._binned [targ_4] = numpy.zeros (
                    self.MAXNBINS, dtype = numpy.float64)
            self._cumave [target] = 0., 0., 0., 0., 0
            self._curbin [target] = curbin = numpy.zeros (
                    bin_size, dtype = numpy.float64)
            self._curbin [targ_2] = curb_2 = \
                    numpy.zeros (bin_size, dtype = numpy.float64)
            self._curbin [targ_3] = curb_3 = \
                    numpy.zeros (bin_size, dtype = numpy.float64)
            self._curbin [targ_4] = curb_4 = \
                    numpy.zeros (bin_size, dtype = numpy.float64)
            init_val = self.measure (target)
            curbin [0] = init_val
            curb_2 [0] = init_val ** 2.
            curb_3 [0] = init_val ** 3.
            curb_4 [0] = init_val ** 4.
        self._ntot = 0 # absolute counter for all bins calculated
        self._pos = 0 # current bin position -- can roll back
        # >>>
    # >>>
    def __repr__ (self): # <<<
        return '%s (%s)' % (self.__class__.__name__,
                ', '.join (['%s = %s' % (key, repr (val))
                    for key, val in self._dargs.items ()]))
    # >>>

    def bin_rolled_back (self): # <<<
        return self._ntot != self._pos
    # >>>
    def bin_size (self): # <<<
        ans = None
        for target, tmp_, tmp_, tmp_ in self._targets:
            ans = self._curbin [target].shape [-1]
            break
        if ans is None:
            ans = self._dargs ['bin_size']
        return ans
    # >>>
    def data (self, key): # <<<
        """
        This method returns the current configuration data, or binned averages for physical observables.  The returned array object is a copy and is safe to modify.  This method and "report" can be viewed as "output methods."

        key : 'nu' or 'E', 'E^2', 'E^3', 'E^4', or 'M', 'M^2', 'M^3', 'M^4'
                'nu' means the current spin configuration
                other value means the binned data
        """
        ans = None
        if key == 'nu':
            ans = numpy.array (self._nu, dtype = numpy.byte)
        else:
            for target_tuple in self._targets:
                if key in target_tuple:
                    ans = self._binned [key]
                    if self.bin_rolled_back ():
                        ans = numpy.concatenate ((ans [self._pos:],
                                ans [:self._pos]))
                    else:
                        ans = numpy.array (ans [:self._pos])
        if ans is None:
            raise ValueError, "Could not find any data for key = '%s'." % key
        return ans
    # >>>
    def measure (self, target): # <<<
        """
        Returns the value of an observable ("target") corresponding, only, to the current configuration.  While this method is important as it is used to initialize the values of observables, this method is NOT the main method to subsequently measure physical quantities during the main Monte Carlo iteration, since this method is expensive to run.  However, this method can/should be used occasionally to check the physical quantities measured inside the method "run."  That method keeps track of all physical quantities relevant, and stores them in appropriate arrays, as well as computing cumulative averages.

        target :  Must be one of 'E', 'M'.
        """
        nu = self._nu
        xdim, ydim = nu.shape
        add_reduce = numpy.add.reduce
        if target == 'M':
            val = add_reduce (add_reduce (nu))
            val = 2 * val - xdim * ydim
        elif target == 'E':
            val = add_reduce (add_reduce (nu [:-1, :] ^ nu [1:, :]))
            val += add_reduce (nu [-1, :] ^ nu [0, :])
            val += add_reduce (add_reduce (nu [:, :-1] ^ nu [:, 1:]))
            val += add_reduce (nu [:, -1] ^ nu [:, 0])
            val = 2 * val - 2 * xdim * ydim
        else:
            raise ValueError, "Unknown target '%s'" % target
        return val
    # >>>
    def report (self): # <<<
        print 'Monte Carlo problem:'
        print self
        print '# of bins calculated so far:', self._ntot
        print '# of calculated bins available:', \
                self.max_num_bins () if self.bin_rolled_back () else self._pos
        print 'Data rolled back? --', \
                'Yes.' if self.bin_rolled_back () else 'No.'
        self.summarize_cumave ()
    # >>>
    def run (self, n = 1000): # <<<
        """
        This is where main actions occur.

        n :  number of bins to calculate
        """
        try:
            from progressDisplayer import PDI
            iterator = lambda n: PDI (xrange (n))
        except:
            iterator = xrange
        simple_iterator = xrange
        assert n > 0
        assert type (n) == int
        xdim, ydim = self._nu.shape
        random_randint = random.randint
        random_random = random.random
        nu = self._nu
        numpy_exp = numpy.exp
        T_times_m1 = self._T * -1.0
        targets = self._targets
        curbin = self._curbin
        binned = self._binned
        cumave = self._cumave
        bin_pos = bin_pos0 = self._pos
        add_reduce = numpy.add.reduce
        max_num_bins = self.max_num_bins ()
        bin_size = self.bin_size ()
        def update_cumave (pos0, pos1): # <<<
            n_more = pos1 - pos0
            if n_more <= 0:
                return
            for target, targ_2, targ_3, targ_4 in targets:
                ave_old, sqa_old, cba_old, qta_old, ntot_old \
                        = cumave [target]
                ntot = ntot_old + n_more
                ave = (ave_old * ntot_old +
                        add_reduce (binned [target][pos0: pos1])) / ntot
                sqa = (sqa_old * ntot_old +
                        add_reduce (binned [targ_2][pos0: pos1])) / ntot
                cba = (cba_old * ntot_old +
                        add_reduce (binned [targ_3][pos0: pos1])) / ntot
                qta = (qta_old * ntot_old +
                        add_reduce (binned [targ_4][pos0: pos1])) / ntot
                cumave [target] = ave, sqa, cba, qta, ntot
        # >>>
        # The following loop is the main algorithm.  It is big and ugly, as it
        # tries to avoid involving any functions or methods, for speed.  For
        # the same reason, dot expressions or global variables are avoided in
        # the loop.  The speed of the loop is tolerable.  However, to really
        # speed things up, this loop must be implemented in Cython or C (to
        # do).
        for _i_ in iterator (n):
            for pos in simple_iterator (bin_size):
                # <<< pick i, j, and run Metropolis -> i, j, s, E_inc
                i = random_randint (0, xdim - 1)
                j = random_randint (0, ydim - 1)
                s = nu [i][j]
                # find nn spin values
                s_u = nu [i - 1][j] if i > 0 else nu [-1][j]
                s_d = nu [i + 1][j] if i + 1 < xdim else nu [0][j]
                s_l = nu [i][j - 1] if j > 0 else nu [i][-1]
                s_r = nu [i][j + 1] if j + 1 < ydim else nu [i][0]
                # First, calculate E_increment assuming s == False. (FM model)
                E_inc = -2. if s_u else 2.
                E_inc += -2. if s_d else 2.
                E_inc += -2. if s_l else 2.
                E_inc += -2. if s_r else 2.
                # Then, flip the sign of E_increment, if s == True (still FM)
                if s:
                    E_inc *= -1.
                # E_inc calculation complete for the FM model.
                if E_inc > 0: # Metropolis sampling
                    if random_random () > numpy_exp (E_inc / T_times_m1):
                        i = None # meaning "keep old"
                        E_inc = 0.
                # >>>
                # <<< build increment dict, update nu
                increment = {}
                if i is None: # keeping old
                    increment ['E'] = increment ['M'] = 0.
                else:
                    nu [i][j] = s = 1 - s # flip spin
                    increment ['E'] = E_inc
                    increment ['M'] = 2. if s else -2.
                # >>>
                # <<< update data, but not counters
                newpos = pos + 1
                if newpos == bin_size:
                    newpos = 0
                for target, targ_2, targ_3, targ_4 in targets:
                    oldval = curbin [target][pos]
                    newval = oldval + increment [target]
                    if newpos == 0: # the last loop -- collect results
                        binned [target][bin_pos] = \
                                add_reduce (curbin [target]) / bin_size
                        binned [targ_2][bin_pos] = \
                                add_reduce (curbin [targ_2]) / bin_size
                        binned [targ_3][bin_pos] = \
                                add_reduce (curbin [targ_3]) / bin_size
                        binned [targ_4][bin_pos] = \
                                add_reduce (curbin [targ_4]) / bin_size
                    curbin [target][newpos] = newval
                    curbin [targ_2][newpos] = newval ** 2.
                    curbin [targ_3][newpos] = newval ** 3.
                    curbin [targ_4][newpos] = newval ** 4.
                # >>>
            # <<< update counter, update cumave
            bin_pos += 1
            if bin_pos == max_num_bins:
                update_cumave (bin_pos0, max_num_bins)
                bin_pos = bin_pos0 = 0
            # >>>
        # <<< update counters, update cumave
        self._ntot += n
        self._pos = bin_pos
        update_cumave (bin_pos0, bin_pos)
        # >>>
        # <<< check internal consistency on current values of observables
        for target, targ_2, targ_3, targ_4 in targets:
            val = curbin [target][0]
            v_2 = curbin [targ_2][0]
            v_3 = curbin [targ_3][0]
            v_4 = curbin [targ_4][0]
            v = self.measure (target)
            v2 = v ** 2.
            v3 = v ** 3.
            v4 = v ** 4.
            for v1_, v2_ in zip ([v, v2, v4], [val, v_2, v_4]):
                if abs (v1_ - v2_) > 1.e-8 * max (abs (v1_), abs (v2_)):
                    print target, targ_2, targ_4
                    print val, v_2, v_4
                    print v, v2, v4
                    raise Exception, "Internal inconsistency"
        # >>>
    # >>>
    def N (self): # <<<
        xdim, ydim = self._nu.shape
        return xdim * ydim
    # >>>
    def max_num_bins (self): # <<<
        ans = None
        for target, tmp_, tmp_, tmp_ in self._targets:
            ans = self._binned [target].shape [-1]
            break
        if ans is None:
            ans = self.MAXNBINS
        return ans
    # >>>
    def summarize_cumave (self): # <<<
        cumave = self._cumave
        N = self.N ()
        for target, tmp_, tmp_, tmp_ in self._targets:
            ave, sqa, cba, qta, n = cumave [target]
            var = sqa - ave ** 2.
            var_var = qta - 4. * cba * ave + 8. * sqa * ave ** 2. \
                    - sqa ** 2. - 4. * ave ** 4.
            print target, 'per spin:', ave / N
            print target, 'variance:', var
            print target, 'varance of variance:', var_var
        # note
        # heat capacity  = k_B * E variance / (k_B T)^2
        # total mag susc = M variance * mu_B / (k_B T)
        # variance of variance is thus related to the variance of the
        # heat capacity or the mag susc.
    # >>>

if __name__ == '__main__':
    m = MC_Ising2D ()
    m.run (n = 1000)
    m.report ()
    from pylab import *
    pcolormesh (m._nu)
    cool ()
    savefig ('test.png')
