示例#1
0
    def __init__(
            self,
            loss='pointwise',
            n_factors=8,
            n_iter=20,
            batch_size=256,
            reg_mdr=0.00001,  # L2, L1 regularization
            reg_mass=0.0001,  # L2, L1 regularization
            lr=1e-2,  # learning_rate
            decay_step=20,
            decay_weight=0.5,
            optimizer_func=None,
            use_cuda=False,
            random_state=None,
            num_neg_samples=1,  #number of negative samples for each positive sample.
            dropout=0.5,
            distance_metric='l2',
            activation_func='none',  #relu, or tanh,
            activation_func_mdr='none',
            n_layers_mdr=1,
            model='mass',
            beta=0.5,
            args=None):
        super(REC, self).__init__()
        self._args = args
        self._loss = loss
        self._n_factors = n_factors
        self._embedding_size = n_factors

        self._n_iters = n_iter
        self._batch_size = batch_size
        self._lr = lr
        self._decay_step = decay_step
        self._decay_weight = decay_weight

        self._reg_mdr = reg_mdr
        self._reg_mass = reg_mass
        self._optimizer_func = optimizer_func

        self._use_cuda = use_cuda
        self._random_state = random_state or np.random.RandomState()
        self._num_neg_samples = num_neg_samples

        self._n_users = None
        self._n_items = None
        self._lr_schedule = None
        self._loss_func = None
        self._dropout = dropout
        self._distance_metric = distance_metric
        self._model = model
        self._beta = beta

        #my_utils.set_seed(self._random_state.randint(-10**8, 10**8), cuda=self._use_cuda)
        my_utils.set_seed(gc.SEED)

        self._activation_func = activation_func
        self._activation_func_mdr = activation_func_mdr
        self._n_layers_mdr = n_layers_mdr
        #for evaluation during training
        self._sampler = my_sampler.Sampler()
示例#2
0
    def update_vector(self, wf, init_state, batch_size, gamma, step, therm=False):  # Get the vector of updates
        self.nvar = self.get_nvar(wf)
        wf.init_lt(init_state)
        samp = sampler.Sampler(wf, self.h, mag0=self.m)  # start a sampler
        samp.nflips = self.h.minflips
        samp.state = np.copy(init_state)
        samp.reset_av()
        if therm == True:
            samp.thermalize(batch_size)

        results = Parallel(n_jobs=self.parallel_cores)(
            delayed(get_sample)(samp, self) for i in range(batch_size))  # Pass sampling to parallelization

        elocals = np.array([i[0] for i in results])
        deriv_vectors = np.array([i[1] for i in results])
        states = np.array([i[2] for i in results])
        # Now that we have all the data from sampling let's run our statistics
        # cov = self.get_covariance(deriv_vectors)
        cov_operator = LinearOperator((self.nvar, self.nvar), dtype=complex,
                                      matvec=lambda v: self.cov_operator(v, deriv_vectors, step))

        forces = self.get_forces(elocals, deriv_vectors)

        # Now we calculate the updates as
        # updates = -gamma * np.dot(np.linalg.pinv(cov), forces)
        vec, info = cg(cov_operator, forces)
        # vec, info = cg(cov, forces)
        updates = -gamma * vec
        self.step_count += batch_size
        return updates, samp.state, np.mean(elocals) / self.nspins
示例#3
0
def get_ddist(wf, H, delta, nruns):  # get the quantity D_0^2
    # see milanote for the breakdown into expectation values

    h_samp = sampler.Sampler(wf, H, quiet=False)
    h_samp.run(nruns)  # calculate < H>
    e = h_samp.estav * wf.nv  # Sampler naturally returns a per-spin result

    z = getZ(wf, nruns)  # <I>
    h2op = Hsq(H)  # build the H^2 sampler
    hsq_samp = sampler.Sampler(wf, h2op)
    hsq_samp.run(nruns)
    h2 = hsq_samp.estav * wf.nv  # calculate <H^2>

    frac = min((z**2 + delta**2 * e**2) / (z * (z + delta**2 * h2)), .9999)
    #frac should never be greater than 1 but can occur due to MC error

    ddist = acos(sqrt(frac))**2
    return ddist
示例#4
0
def get_rdist(wf, nruns, h):  # get the quantity R_0^2

    h2op = Hsq(h)  # build the H^2 sampler
    hsq_samp = sampler.Sampler(wf, h2op)
    hsq_samp.run(nruns)
    h2 = hsq_samp.estav * wf.nv  # calculate <H^2>
    s = sampler.Sampler(
        wf,
        IdentityOp(10))  # build a sampler with generic (identity) observable
    s.state = np.random.permutation(
        np.concatenate((np.ones(int(5)), -1 * np.ones(int(5)))))
    #s.state = np.ones(wf.nv)
    s.thermalize(1000)  # thermalize
    state = np.copy(s.state)  # take the ended state
    t = trainer.build_trainer(wf, h, reg_list=(0, 0, 0))
    # Get -i*S^-1*F for our wavefunction
    u = t.update_vector(wf, state, 1000, 1j, 1)[0]
    dtpsi2 = []  # this is the denominator
    states = []
    avg1 = 0
    for j in range(nruns):
        wf.init_lt(state)
        for i in range(s.nspins):  # Do Monte Carlo sweeps
            s.move()  # make a move
        state = np.copy(
            s.state
        )  # make that move the new state, now we calculate (H*dt)_local
        states.append(state)

        d = t.get_deriv_vector(
            state,
            s.wf)  # get the vector of quantities (1/psi) dpsi/d(parameter)
        eloc = t.get_elocal(state, s.wf)  # get the local energy
        avg1 += eloc * np.dot(d, u)  #numerator of the rdist
        dtpsi2.append(abs(np.dot(d, u))**2)  # The <dtPsi>^2 part

    avg1 = abs(avg1 / nruns)**2
    avg2 = np.mean(dtpsi2)
    rdist = acos(sqrt(avg1 / (avg2 * h2)))**2
    return rdist
示例#5
0
    def testSAA(self):
        '''
        create a 2D movie, based on the data we put in the container object 
        in the setUp method sthis method does all the graphics involved
        since this is a 2D running for lots of points might take a while
        '''

        # for reproducibility
        np.random.seed(1792)

        # parameters to play with
        nSamples = 5000  # number of samples we use for KL
        maxiter = 30000  # max number of optimization steps
        nPoints = 60  # The number of evaluations of the true likelihood
        M = 7  # bound on the plot axes
        nopt = 50
        nwalk = 50
        burn = 500
        delta = 0.1

        # initialize container and sampler
        specs = cot.Container(rose.rosenbrock_2D)
        n = 1
        for i in range(-n, n + 1):
            for j in range(-n, n + 1):
                specs.add_point(np.array([2 * i, 2 * j]))
        sampler = smp.Sampler(specs,
                              target=targets.exp_krig_sigSqr,
                              maxiter=maxiter,
                              nwalkers=nwalk,
                              noptimizers=nopt,
                              burn=burn)
        sampler.run_mcmc(500)
        mc = sampler.flatchain()

        # memory allocations. constants etc
        a = np.arange(-M, M, delta)
        X, Y = np.meshgrid(a, a)  # create two meshgrid
        grid = np.asarray([np.ravel(X), np.ravel(Y)])
        xn = np.array([1.33, 2.45])

        avgC, gradAvgC = _g.avg_var(xn, specs.U, specs.S, specs.V, specs.Xarr,
                                    grid, specs.r, specs.d, specs.reg)
        avgPy, gradAvgPy = _g.avg_var(xn, specs.U, specs.S, specs.V,
                                      specs.Xarr, mc, specs.r, specs.d,
                                      specs.reg)

        print(avgC)
        print(avgPy)
 def _init(self, n_suggestions):
     self.batch_size = n_suggestions
     n_init_points = self.config['n_init_points']
     if n_init_points == -1:
         # Special value to use the default 2*D+1 number.
         n_init_points = 2 * self.dim + 1
     self.n_init = max(self.batch_size, n_init_points)
     exp_design = self.config['experimental_design']
     if exp_design == 'latin_hypercube':
         X_init = latin_hypercube(self.n_init, self.dim)
     elif exp_design == 'halton':
         halton_sampler = sampler.Sampler(method='halton',
                                          api_config=self.api_config,
                                          n_points=self.n_init)
         X_init = halton_sampler.generate(random_state=self.sampler_seed)
         X_init = self.space_x.warp(X_init)
         X_init = to_unit_cube(X_init, self.lb, self.ub)
     elif exp_design == 'lhs_classic_ratio':
         lhs_sampler = sampler.Sampler(method='lhs',
                                       api_config=self.api_config,
                                       n_points=self.n_init,
                                       generator_kwargs={
                                           'lhs_type': 'classic',
                                           'criterion': 'ratio'
                                       })
         X_init = lhs_sampler.generate(random_state=self.sampler_seed)
         X_init = self.space_x.warp(X_init)
         X_init = to_unit_cube(X_init, self.lb, self.ub)
     else:
         raise ValueError(f'Unknown experimental design: {exp_design}.')
     self.X_init = X_init
     if DEBUG:
         print(
             f'Initialized the method with {self.n_init} points by {exp_design}:'
         )
         print(X_init)
示例#7
0
    def evolve(self,
               wf,
               deltat,
               ntsteps,
               batch_size=100,
               symmetry='None',
               file='none',
               print_freq=25,
               out_freq=0):
        # Function to evolve the wavefunction forward in time
        # Since we can view time evolution as being like the original training but with imaginary steps,
        # we take advantage of the existing training code
        self.deltat = deltat
        t = trainer.Trainer(self.h, reg_list=(
            0, 0, 0))  # Default, will be reassigned if we get a symmetry arg

        if symmetry == "local":
            t = trainer.TrainerLocal(
                self.h,
                reg_list=(0, 0, 0))  # note no regulator for time evolution

        if symmetry == "ti":
            t = trainer.TrainerTI(self.h, reg_list=(0, 0, 0))

        if symmetry == "general":
            t = trainer.TrainerSymmetric(self.h, reg_list=(0, 0, 0))

        s = sampler.Sampler(
            wf,
            self.h)  # To determine the starting state, we initialize a sampler
        s.run(5000)
        init_state = s.state

        wf, elist = t.train(wf,
                            init_state,
                            batch_size,
                            ntsteps,
                            self.gamma,
                            print_freq=print_freq,
                            file=file,
                            out_freq=out_freq)

        return wf
示例#8
0
    def __init__(self):
        self.ml = None
        self.dron = None
        self.gui = None
        self.quit_flag=False
        self.dron_actual_frame = None #Se obtiene del video_stream de dron_in
        self.dron_actual_status = None 
        self.ml_actual_prediction = None 
        self.speed = 30

        self.controls = {
            "p": lambda dron,speed: self.startMlProcess(speed),
            "t": lambda dron,speed: self.startSampleTake(speed),

            "escape": lambda dron,speed: self.quitFunction(),

            'w': 'forward',
            's': 'backward',
            'a': 'left',
            'd': 'right',
            'up': 'up',
            'down': 'down',
            'space': 'up',
            'left shift': 'down',
            'right shift': 'down',

            #Add and decrease speed
            '1': lambda dron,speed: self.increaseSpeed(1),
            '0': lambda dron,speed: self.decreaseSpeed(1),

            # arrow keys for fast turns and altitude adjustments
            'q': 'counter_clockwise',
            'e': 'clockwise',

            'left': lambda dron, speed: dron.counter_clockwise(speed*2),
            'right': lambda dron, speed: dron.clockwise(speed*2),
            'tab': lambda dron, speed: dron.takeoff(),
            'backspace': lambda dron, speed: dron.land(),
            #Modify overall speed
        }
        #Init sampler
        self.sampler = sampler.Sampler()
        self.sampler.ctrl=self
示例#9
0
#    target_dis = stats.gamma(a=3.99)
#    return target_dis.pdf(sample[0,]) * target_dis.pdf(sample[1,])


def target2d(sample):
    target_dis = stats.gamma(a=3.99).pdf(sample[1, ])
    normal_dis = stats.multivariate_normal(0, 1).pdf(sample[0, ])
    return target_dis * normal_dis


dim = 1
if dim == 1:
    sams = np.linspace(-5, 5, 500)
    plt.plot(sams, target(sams), c='orange')
    samples = sm.Sampler(dim=dim).metropolis_hastings(target=target,
                                                      niter=2500,
                                                      nburn=100)
    plt.hist(samples[0, ], bins=100, density=True, linewidth=1)
    plt.show()
elif dim == 2:
    X, Y = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
    Z = np.array([X, Y])
    fig, ax = plt.subplots(1, 2, figsize=(12, 3))
    ax[0].contourf(X, Y, target2d(Z))
    samples = sm.Sampler(dim=dim).metropolis_hastings(target=target2d,
                                                      start=-0.5,
                                                      niter=6000,
                                                      nburn=100)
    ax[1].hist2d(samples[0, ],
                 samples[1, ],
                 bins=100,
示例#10
0
lv_match = np.all(np.isclose(n1.log_val(state), n2.log_val(state)))
lp_match = np.all(np.isclose(n1.log_pop(state, flips), n2.log_pop(state, flips)))
print("Log_val matches: {}".format(lv_match))
print("Log_pop matches: {}".format(lp_match))

if not (lv_match and lp_match):
    exit()

nruns = 1000
h = ising1d.Ising1d(40, 0.5)

print("Sampling n1 ...")

start_time = time.time()
s1 = sampler.Sampler(n1, h)
s1.run(nruns)
print("time elapsed: {:.2f}s".format(time.time() - start_time))

print("Sampling n2 ...")

start_time = time.time()
s2 = sampler.Sampler(n2, h)
s2.run(nruns)
print("time elapsed: {:.2f}s".format(time.time() - start_time))

def gamma_fun(p):
    return .01

t = trainer.TrainerLocal(h)
示例#11
0
    experiment = lognorm.Experiment(params[0], params[1], params[2], params[3],
                                    140, costs, 230)
    simSizes = lognorm.run(experiment, False)
    return simSizes


numSites = 140
costs = lognorm.loadCosts('../data/costMatrixSea.csv', numSites)
sites = '../data/cities_weights.csv'
data = lognorm.loadHistoricalSites(sites, numSites)
eps = threshold.LinearEps(30, 6000, 3000)
priors = sampler.TophatPrior([0, 0, 0, 0.0001], [2, 2, 2, 2])

sampler = sampler.Sampler(N=200,
                          Y=data,
                          postfn=postfn,
                          dist=lognorm.distAbs,
                          threads=16)

for pool in sampler.sample(priors, eps):
    print("T: {0}, eps: {1:>.4f}, ratio: {2:>.4f}".format(
        pool.t, pool.eps, pool.ratio))
    for i, (mean, std) in enumerate(
            zip(np.mean(pool.thetas, axis=0), np.std(pool.thetas, axis=0))):
        print(u"    theta[{0}]: {1:>.4f} \u00B1 {2:>.4f}".format(i, mean, std))
    np.savetxt("result_" + str('%.2f') % pool.eps + '.csv',
               pool.thetas,
               delimiter=";",
               fmt='%1.5f')

print(pool.thetas)
示例#12
0
    wf.a = 0.1 * np.random.uniform(-1, 1) * np.ones(wf.a.shape) + 0j
wf.breduced = 0.1 * np.random.uniform(-1, 1, wf.breduced.shape) + 0j

#wf.load_parameters('../Outputs/'+str(nspins)+'_alpha='+str(alpha)+'_Ising10_ti_100.npz')

h = ising1d.Ising1d(nspins, 1.0)
#h = heisenberg1d.Heisenberg1d(10,1)
#h = xyz.XYZ(10,(-1,-1,0))
#h = fermionhop1d.FermionHop(nspins,-2)
base_array = -1 * np.ones(nspins)
base_array[0:nspins // 2] *= -1
state = np.random.permutation(
    base_array)  # return a random permutation of the half 1, half-1 array
wf.init_lt(state)

s = sampler.Sampler(wf, h, mag0=m)
s.run(nruns, init_state=state)

state = s.state
wf.init_lt(state)


def gamma_fun(p):
    #return .05
    return max(.05 * (.994**p),
               .005)  #This is chosen to give a factor of 10 in about 400 steps
    #return .05 / (2 ** (p // 50))


t = trainer.build_trainer(wf, h)
示例#13
0
wf = nqs.NqsLocalTI(40, 1, k)  # A translation invariant NQS instance

wf.Wloc = 0.1 * np.random.random(
    wf.Wloc.shape) + 0j  # Fill in with starting values
wf.a = 0.1 * np.random.uniform() + 0j
wf.b = 0.1 * np.random.random(wf.b.shape) + 0j

base_array = np.concatenate(
    (np.ones(int(20)),
     -1 * np.ones(int(20))))  # make an array of half 1, half -1
state = np.random.permutation(
    base_array)  # return a random permutation of the half 1, half-1 array
wf.init_lt(state)

s = sampler.Sampler(wf, h)
s.run(nruns)

state = s.state
wf.init_lt(state)


def gamma_fun(p):
    return gam


t = trainer.TrainerLocalTI(h)

wf, elist = t.train(wf,
                    state,
                    100,
示例#14
0
    experiment = entropy.Experiment(0, params[0], params[1], params[2])
    simSites = entropy.runEntropy(experiment, sites, False)
    return simSites


sites = '../data/cities_weights.csv'
data = entropy.loadHistoricalSites(sites)

eps = threshold.LinearEps(15, 200, 150)
priors = sampler.TophatPrior([0, 0, 0], [2, 2, 10])

mpi_pool = mpi_util.MpiPool()
sampler = sampler.Sampler(N=200,
                          Y=data,
                          postfn=postfn,
                          dist=entropy.distRelative,
                          pool=mpi_pool)

for pool in sampler.sample(priors, eps):
    logFile = open('general_' + str(os.getpid()) + '.txt', 'a')
    logFile.write('starting eps: ' + str(pool.eps) + '\n')
    logFile.close()
    print("T: {0}, eps: {1:>.4f}, ratio: {2:>.4f}".format(
        pool.t, pool.eps, pool.ratio))
    for i, (mean, std) in enumerate(
            zip(np.mean(pool.thetas, axis=0), np.std(pool.thetas, axis=0))):
        print(u"    theta[{0}]: {1:>.4f} \u00B1 {2:>.4f}".format(i, mean, std))
    np.savetxt("result_" + str('%.2f') % pool.eps + '.csv',
               pool.thetas,
               delimiter=";",
示例#15
0
# Now begin testing outputs
base_array = np.concatenate(
                (np.ones(int(20)), -1 * np.ones(int(20))))  # make an array of half 1, half -1
state = np.random.permutation(base_array)  # return a random permutation of the half 1, half-1 array

n1.init_lt(state)
n2.init_lt(state)

flips = np.array(np.random.choice(np.arange(n1.nv), 2))

print("Log_val matches: {}".format(np.all(np.isclose(n1.log_val(state), n2.log_val(state)))))
print("Log_pop matches: {}".format(np.all(np.isclose(n1.log_pop(state, flips), n2.log_pop(state, flips)))))

nruns = 1000
h = heisenberg1d.Heisenberg1d(40,1)

print("\nSampling n1 ...")

start_time = time.time()
s1 = sampler.Sampler(n1, h, quiet = False)
s1.run(nruns)
print("time elapsed: {:.2f}s".format(time.time() - start_time))

print("\nSampling n2 ...")

start_time = time.time()
s2 = sampler.Sampler(n2, h, quiet = False)
s2.run(nruns)
print("time elapsed: {:.2f}s".format(time.time() - start_time))
示例#16
0
import sampler
import sampler_o
import utils
from gradient_visualizer import GradientVisualizer

if __name__ == "__main__":
    image_path = './imgs/cute.jpg'
    cute_cat = utils.loadimage(image_path).unsqueeze(0)
    print(cute_cat.shape)

    trans_mat = torch.Tensor([[[0.6705,  0.4691, -0.1369],
                               [-0.4691,  0.6705, -0.0432]]])
    out_shape = [128, 128]

    bilinear_sampler = sampler.Sampler('bilinear', 'zeros')
    bilinear_tarnsformed = bilinear_sampler.warp_image(
        cute_cat, trans_mat, out_shape=out_shape)
    # save_image(bilinear_tarnsformed, 'bilinear_transformed.png')
    # utils.showimg(bilinear_tarnsformed)

    bicubic_sampler = sampler.Sampler('bicubic', 'zeros')
    bicubic_tarnsformed = bilinear_sampler.warp_image(
        cute_cat, trans_mat, out_shape=out_shape)
    # save_image(bicubic_tarnsformed, 'bicubic_transformed.png')
    # utils.showimg(bicubic_tarnsformed)

    # utils.torchseed(666)
    # linearized_sampler_o = sampler_o.Sampler('linearized', 'zeros')
    # linearized_tarnsformed_o = linearized_sampler_o.warp_image(
    #     cute_cat, trans_mat, out_shape=out_shape)
示例#17
0
logging.basicConfig(filename=os.path.join(expe_path, 'ceec_abc.log'),
                    level=logging.INFO,
                    filemode="w")

data = 0  #Our idealized input

#eps = threshold.ExponentialConstEps(.3, .01,5,5)
eps = threshold.ExponentialEps(2, 2000, .2)

##priors:market_size,mu,N,G,step)
priors = sampler.TophatPrior([0, 0, 200, 2, 10], [1, 1, 400, 6, 30])

sampler = sampler.Sampler(N=500,
                          Y=data,
                          postfn=postfn,
                          dist=ceec.dist,
                          threads=1)

for pool in sampler.sample(priors, eps):
    print("T: {0}, eps: {1:>.4f}, ratio: {2:>.4f}".format(
        pool.t, pool.eps, pool.ratio))
    for i, (mean, std) in enumerate(
            zip(np.mean(pool.thetas, axis=0), np.std(pool.thetas, axis=0))):
        print(u"    theta[{0}]: {1:>.4f} \u00B1 {2:>.4f}".format(i, mean, std))
    print(ceec.indices.keys())
    print(sorted(ceec.indices.items(), key=operator.itemgetter(1)))
    s = (sorted(ceec.indices.items(), key=operator.itemgetter(1)))
    slsit = [v[0] for e, v in enumerate(s)]
    print(slsit)
    np.savetxt(os.path.join(expe_path,
示例#18
0
import entropy 
import os
import sys

def postfn(params):
#    print('postfn, params:',params) 
    entropy.loadCosts('../data/costMatrixSea.csv')
    sites = '../data/cities_weights.csv'

    experiment = entropy.Experiment(0, params[0], params[1], params[2])
    simSites = entropy.runEntropy(experiment, sites, False)
    return simSites

sites = '../data/cities_weights.csv'
data = entropy.loadHistoricalSites(sites)

eps = threshold.LinearEps(15, 200, 150)
priors = sampler.TophatPrior([0,0,0],[2,2,10])

sampler = sampler.Sampler(N=200, Y=data, postfn=postfn, dist=entropy.distRelative, threads=16)

for pool in sampler.sample(priors, eps):
    print("T: {0}, eps: {1:>.4f}, ratio: {2:>.4f}".format(pool.t, pool.eps, pool.ratio))
    for i, (mean, std) in enumerate(zip(np.mean(pool.thetas, axis=0), np.std(pool.thetas, axis=0))):
        print(u"    theta[{0}]: {1:>.4f} \u00B1 {2:>.4f}".format(i, mean,std))
    np.savetxt("result_"+str('%.2f')%pool.eps+'.csv', pool.thetas, delimiter=";", fmt='%1.5f')

print(pool.thetas)
np.savetxt("foo.csv", pool.thetas, delimiter=";", fmt='%1.5f')

示例#19
0
def main():
    parser = argparse.ArgumentParser(description="Train U-net")

    parser.add_argument('--name', type=str, default='unknown',
                        help='network name')

    parser.add_argument('--model_dir', type=str, required=True,
                        help='Where network will be saved and restored')

    parser.add_argument("--lr",
                        type=float,
                        default=1e-4,
                        help="Adam learning rate")

    parser.add_argument("--batch_size",
                        type=int,
                        default=5,
                        help="Batch size")


    parser.add_argument("--input_size",
                        type=int,
                        default=324,
                        help="Input size of the image will fed into network. Input_size = 16*n + 4, Default: 324")

    parser.add_argument("--output_size",
                        type=int,
                        default=116,
                        help="size of the image produced by network. Default: 116")


    parser.add_argument("--tb_log_dir",
                        type=str,
                        required=True,
                        help="Tensorboard log dir")

    parser.add_argument("--n_steps",
                        type=int,
                        default=0,
                        help="Number of the steps. Default: 0 means infinity steps.")

    parser.add_argument("--dataset_dir",
                        type=str,
                        default="../dataset/trainset")

    parser.add_argument("--pretrained_vgg",
                        type=str,
                        choices=['yes', 'no'],
                        default="yes",
                        help="Use pretrained vgg weigth")

    parser.add_argument("--fix_vgg",
                        type=str,
                        choices=['yes', 'no'],
                        default="yes",
                        help="Fix vgg weights while learning")

    parser.add_argument("--validation_freq",
                        type=int,
                        default=100,
                        help="Validation freq. Default 100")

    parser.add_argument("--validation_set_size",
                        type=int,
                        default=20,
                        help="metrics will be averaged by validation_set_size. Default 20")

    parser.add_argument("--channel",
                        type=str,
                        choices=['rgb', 'gray'],
                        default="rgb",
                        help="channel. Default: rgb")




    args = parser.parse_args()

    net_name = args.name
    model_dir = args.model_dir
    learning_rate = args.lr
    batch_size = args.batch_size
    net_input_size = args.input_size
    net_output_size = args.output_size
    tb_log_dir = args.tb_log_dir
    n_steps = args.n_steps
    dataset_dir = args.dataset_dir
    pretrained_vgg = args.pretrained_vgg == 'yes'
    fix_vgg = args.fix_vgg == 'yes'
    validation_freq = args.validation_freq
    validation_set_size = args.validation_set_size
    channel = args.channel

    print("Load dataset")
    dataset = DS.DataSet(dataset_dir)

    print("Initialize network manager")
    network_manager = NManager(model_dir, net_name)
    if network_manager.registered:
        net = network_manager.get_net()
    else:
        print("Use pretrained weihts %s" % str(pretrained_vgg))
        net = U.Unet(vgg_pretrained=pretrained_vgg)
        network_manager.register_net(net)

    print("Move to GPU")
    net.cuda()

    if channel == "rgb":
        def get_features(x):
            return x.get_ndarray([DS.ChannelRGB_PanSharpen])
    else:
        def get_features(x):
            img0 = x.get_ndarray([DS.ChannelPAN])[0]
            img = np.array([img0, img0, img0])
            return img


    def get_target(x):
        return x.get_interior_mask()

    train_sampler = S.Sampler(dataset.train_images(), get_features, get_target,
                                         net_input_size, net_output_size, rotate_amplitude=20,
                                         random_crop=True, reflect=True)()

    test_sampler = S.Sampler(dataset.test_images(), get_features, get_target,
                                         net_input_size, net_output_size, rotate_amplitude=20,
                                         random_crop=True, reflect=True)()

    if fix_vgg:
        parameters = list(net.bn.parameters()) + list(net.decoder.parameters()) + list(net.conv1x1.parameters())
    else:
        parameters = net.parameters()

    print("LR: %f" % learning_rate)
    optimizer = torch.optim.Adam(parameters, lr=learning_rate)

    logger = SummaryWriter(tb_log_dir + "/" + net_name)

    print("Start learning")
    with network_manager.session(n_steps) as (iterator, initial_step):
        for step in tqdm.tqdm(iterator, initial=initial_step):
            batch_features, batch_target = batch_generator(train_sampler, batch_size)
            
            batch_features = Variable(FloatTensor(batch_features)).cuda()
            batch_target = Variable(FloatTensor(batch_target)).cuda()

            predicted = net.forward(batch_features)

            train_metrics = eval_base_metrics(predicted, batch_target)
            train_metrics = eval_precision_recall_f1(**train_metrics)

            loss = train_metrics['loss']

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            log_metrics(logger, '', train_metrics, step)
            logger.add_scalar('lr', np.log(learning_rate)/np.log(10), step)
            
            if step % 1000 == 0:
                network_manager.save()

            if step % validation_freq == 0:
                test_metrics = average_metrics(net, test_sampler, batch_size, validation_set_size)
                log_metrics(logger, 'val', test_metrics, step)

                avg_train_metrics = average_metrics(net, train_sampler, batch_size, validation_set_size)
                log_metrics(logger, 'avg_train', avg_train_metrics, step)

                generate_image(logger, net, 'val', dataset.test_images(), get_features, get_target,
                               net_input_size, net_output_size, step)

                generate_image(logger, net, 'train', dataset.train_images(), get_features, get_target,
                               net_input_size, net_output_size, step)
示例#20
0
文件: main_stein.py 项目: ZofiaTr/DFM
args = parser.parse_args()
# folder where the data is saved
dataFolderName = args.dataFolder
nrSteps = args.nrSteinSteps
steinStepSize = args.steinStepSize
modnr = args.modnr

# intialize sampler class together wuth the model
mdl = model.Model('Alanine')
intg = integrator.Integrator(model=mdl,
                             gamma=1.0 / unit.picosecond,
                             temperature=300 * unit.kelvin,
                             dt=2.0 * unit.femtosecond,
                             temperatureAlpha=300 * unit.kelvin)
smpl = sampler.Sampler(model=mdl,
                       integrator=intg,
                       algorithm=0,
                       dataFileName='Data')

# stein
st = stein.Stein(smpl, dataFolderName, modnr=modnr)
# change the stein step size
st.epsilon_step = unit.Quantity(steinStepSize, smpl.model.x_unit)**2
print('Running steinIS on ' + repr(len(st.qInit)) + ' points')
np.save(dataFolderName + '/stein_initial.npy', st.qInit)

#run stein
st.run_stein(numberOfSteinSteps=nrSteps)

np.save(dataFolderName + '/stein_final.npy', st.q)
示例#21
0
    'PTLabontaraEkaKarsa',

    # 'app_jambi',  # d
    # 'app_kalbar',
    # 'app_kaltim',

    #'app_oki',  # c 'mukti_prakarsa',
    'app_riau',
    'multipersada_gatramegah',  #'musim_mas',  # 'unggul_lestari',

    # e
    'gar_pgm',
    'PTAgroAndalan',
    'Bumitama_PTGemilangMakmurSubur',
]
my_sampler = sampler.Sampler()
base_dir = dirfuncs.guess_data_dir()
band_set = {
    0: {
        'blue_max', 'red_max', 'nir_max', 'swir1_max', 'VH_0', 'VH', 'VH_2',
        'VV_0', 'VV', 'VV_2', 'EVI', 'swir2_max', 'brightness', 'wetness',
        'greenness', 'slope'
    },
    1: {
        'blue_max', 'red_max', 'nir_max', 'swir1_max', 'VH_0', 'VH', 'VV_0',
        'VV', 'VV_2', 'EVI', 'swir2_max', 'wetness', 'greenness', 'slope'
    },
    2: {
        'blue_max', 'red_max', 'nir_max', 'swir1_max', 'VH_0', 'VH', 'VV_0',
        'VV', 'VV_2', 'brightness', 'swir2_max', 'wetness', 'greenness',
        'slope'
示例#22
0
## Now that we have all the wavefunctions generated, find the <sigma(x)> at each one

#Fully connected translation-invariant
print("Fully connected ANNQS")
sxnonloc = []
nonlocerr = []
wf = nqs.NqsTI(10, 1)
start_time = time.time()
for t in np.arange(0, nsteps, data_rate):
    if t % talk_rate == 0:
        print('t = {}'.format(t))
    wf.load_parameters('../Outputs/10SpinEvolve/evolution_ti_' + str(t) +
                       '.npz')
    s = sampler.Sampler(wf,
                        observables.Sigmax(10, 1),
                        opname='transverse polarization')
    s.run(nruns)
    sxnonloc.append(10 * s.estav)
    err = distances.get_rdist(wf, nruns,
                              h)  #/distances.get_ddist(wf,h,.01,nruns)
    nonlocerr.append(err)
print("time elapsed: {:.2f}s".format(time.time() - start_time))

#1-local
sx1 = []
loc1err = []
print("1-local ANNQS")
wf = nqs.NqsLocal(10, 1, 1)
start_time = time.time()
for t in np.arange(0, nsteps, data_rate):
示例#23
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug  9 17:06:50 2017

@author: mittelberger2
"""
import sampler
import tifffile
import numpy as np
import matplotlib.pyplot as plt
import sys
import math

sampler.Sampler.c_sampler_path = './sampleUnitCell.so'
sampl = sampler.Sampler()
sampl.load_c_sampler()

img = sampler.calculate_counts(tifffile.imread('0000_SuperScan (MAADF).tif'))

#img = tifffile.imread('FullSuperScanIntM.tif').astype(np.int32)

#sampl.base_vec_1 = np.array((-40.97, -17.71))
a2 = sampl.base_vec_2 = np.array((-26.88, 5.68))
a1 = sampl.base_vec_1 = np.array((14.09, 23.39))

#sampl.offset = 49.5/54*a1 + 34/54*a2 + np.array((-0.50,-0.25))

sampl.offset = np.array((536.949722 + 2048, 521.195463 + 3072))

r1 = (2 * (a1[0]**2 + a1[1]**2)**0.5)
示例#24
0
文件: main_run.py 项目: ZofiaTr/DFM
                                   dt=dt,
                                   massScale=massScale,
                                   gammaScale=gammaScale,
                                   kappaScale=kappaScale)

## load initial condition from file
# IC = md.load('alanine_start_state_IC.h5')
# # remove first dimension - the intial condition has shape (1,nrParticles, spaceDimension) when taken from trajectory
# InitialPosition = np.squeeze(IC.xyz)
# integrator.x0 = InitialPosition * mdl.x_unit

general_sampler = sampler.Sampler(
    model=mdl,
    integrator=integrator,
    algorithm=iAlgo,
    diffusionMapMetric=diffMapMetric,
    dataFileName=dataFileName,
    dataFrontierPointsName=dataFileNameFrontierPoints,
    dataEigenVectorsName=dataFileNameEigenVectors,
    dataEnergyName=dataFileEnergy,
    diffusionMap=diffusionMap)

# nrSteps is number of steps for each nrRep , and iterate the algo nrIterations times - total simulation time is nrSteps x nrIterations
nrSteps = args.nrSteps
nrEquilSteps = 0  #10000
nrIterations = args.niterations
nrRep = args.nreplicas

print('Simulation time: ' +
      repr(nrSteps * nrIterations * dt.value_in_unit(unit.femtosecond)) + ' ' +
      str(unit.femtosecond) + '\n ***** \n')
#
示例#25
0

dlist = []
glist = []

nspins = 40
training_step = 500
nsamples = 50000
alpha = 2

annqs = nqs.NqsTI(nspins, alpha)
annqs.load_parameters('../Outputs/' + str(nspins) + '_alpha=' + str(alpha) +
                      '_Ising10_ti_' + str(training_step) + '.npz')
h = ising1d.Ising1d(nspins, 1.0)

s = sampler.Sampler(annqs, h, quiet=False)
s.run(nsamples)

states = np.zeros((nsamples, nspins))

for i in range(nsamples):
    for j in range(nspins):
        s.move()
    states[i] = s.state

data = np.zeros(nspins)

for size in range(nspins):
    size_data = np.array([])
    for state in states:
        size_data = np.append(size_data, np.sum(state[:size + 1]))
示例#26
0
joint_likelihood.add_likelihood(quantile_likelihood, 1.0)

acceptance_count = 0
acceptance = []
temperatures = []
likelihood = []
excess = []

distances = {c: [] for c in distance_likelihood.categories}

if config["schedule"] == "constant":
    schedule = schedules.ConstantSchedule(config["constant_temperature"])
elif config["schedule"] == "decay":
    schedule = schedules.ExponentialSchedule(config)

sampler = sampler.Sampler(config, joint_likelihood, proposal_distribution,
                          activity_facilities, schedule)
for i in tqdm(range(int(config["total_iterations"]) + 1),
              desc="Sampling locations"):
    accepted = sampler.run_sample()
    if accepted: acceptance_count += 1

    if config["validation_interval"] is not None and i % config[
            "validation_interval"] == 0:
        current_likelihood = capacity_likelihood.get_likelihood()
        validation_likelihood = capacity_likelihood.compute_validation_likelihood(
        )
        if abs(current_likelihood - validation_likelihood) > 1e-12:
            raise AssertionError(
                (current_likelihood, validation_likelihood,
                 abs(current_likelihood - validation_likelihood)))
示例#27
0
    def __init__(self,
                 n_factors = 8,
                 n_iter = 20,
                 batch_size = 256,
                 reg_sdp= 0.00001,  # L2, L1 regularization
                 reg_sdm = 0.0001,    # L2, L1 regularization
                 lr = 1e-2, # learning_rate
                 decay_step = 20,
                 decay_weight= 0.5,
                 optimizer_func = None,
                 use_cuda = False,
                 random_state = None,
                 num_neg_samples = 4, #number of negative samples for each positive sample.
                 dropout=0.2,
                 n_hops=3,
                 activation_func_sdm = 'tanh',
                 activation_func_sdp = 'tanh',
                 n_layers_sdp=1,
                 gate_tying=memnet.GATE_GLOBAL,
                 model='sdm',
                 beta=0.9, args=None
                 ):
        super(Rec, self).__init__()


        self._n_factors = n_factors
        self._embedding_size = n_factors

        self._n_iters = n_iter
        self._batch_size = batch_size
        self._lr = lr
        self._decay_step = decay_step
        self._decay_weight = decay_weight

        self._reg_sdp = reg_sdp
        self._reg_sdm = reg_sdm
        self._optimizer_func = optimizer_func

        self._use_cuda = use_cuda
        self._random_state = random_state or np.random.RandomState()
        self._num_neg_samples = num_neg_samples

        self._n_users = None
        self._n_items = None
        self._lr_schedule = None
        self._loss_func = None
        self._n_hops = n_hops
        self._dropout = dropout

        self._gate_tying = gate_tying
        self._model = model
        self._beta = beta


        #my_utils.set_seed(self._random_state.randint(-10**8, 10**8), cuda=self._use_cuda)
        my_utils.set_seed(gc.SEED)

        self._activation_func_sdm = activation_func_sdm
        self._activation_func_sdp = activation_func_sdp
        self._n_layers_sdp = n_layers_sdp


        self._sampler = my_sampler.Sampler()
        self._args = args

        #create checkpoint directory
        if not os.path.exists(args.saved_path):
            os.mkdir(args.saved_path)


        if not os.path.exists(args.saved_path):
            os.mkdir(args.saved_path)