Example #1
0
def main():
    y = PARAMS_SIM['anio']
    m = PARAMS_SIM['mes']
    d = PARAMS_SIM['dia']
    h = PARAMS_SIM['hora']
    print('Buscando indice')
    save_params(all_params,PATH+'/reports')
    Constants(PATH)
    date = datetime(y,m,d,h)
    #ind = get_index(data_inputs,date)
    ind = 0
    S_climate, S_data, S_prod, A, df_inputs,start,Qdic = sim(agent,ind)
    start = df_inputs['Date'].iloc[0]
    frec_ = int(STEP*24*60) 
    final_indexes = compute_indexes(start,STEPS,frec_)
    df_climate = pd.DataFrame(S_climate, columns=('$T_1$', '$T_2$', '$V_1$', '$C_1$'))
    df_climate.index = final_indexes
    df_climate.to_csv(PATH+'/data/' + 'climate_model.csv')
    save_Q(Qdic,PATH)
    figure_cost_gain(Qdic,final_indexes,PATH)
    figure_state(S_climate,final_indexes,PATH)
    figure_rh_par(S_data,final_indexes,PATH)
    figure_prod(S_prod,final_indexes,PATH)
    figure_actions(A,final_indexes,PATH)
    figure_inputs(df_inputs,PATH)
    print('Guardado en ',PATH)
Example #2
0
File: game.py Project: Dront/snake
    def run_logic(self):
        """
        Updates positions.
        """
        if self.state == State.RUN:

            # collision with wall
            if pygame.sprite.spritecollideany(self.player.head,
                                              self.map.obstacles):
                self.state = State.GAME_OVER

            # collision with tail
            elif pygame.sprite.spritecollideany(self.player.head,
                                                self.player.body):
                self.state = State.GAME_OVER

            # ate a fruit
            ate = pygame.sprite.spritecollideany(self.player.head,
                                                 self.fruit.sprites())
            if ate is not None:
                self.player.eat(ate)
                if self.player.score > params['HIGH_SCORE']:
                    params['HIGH_SCORE'] = self.player.score
                    save_params()

                self.fruit.add(self.create_fruit())
Example #3
0
def main():
    env = GreenhouseEnv()
    agent = DDPGagent(env)
    noise = OUNoise(env.action_space)
    action_dim = env.action_space.shape[0]
    state_dim = env.observation_space.shape[0]
    #writer_reward = SummaryWriter()
    #writer_abs = SummaryWriter()
    #writer_penalty = SummaryWriter()
    t1 = time.time()
    mpl.style.use('seaborn')
    if len(sys.argv) != 1:
        # Load trained model
        PATH = sys.argv[1:].pop()
        print('Se cargo el modelo')
        agent.load(PATH + '/nets')
    else:
        PATH = create_path()

    Constants(PATH)
    all_params['PARAMS_DDPG']['hidden_sizes'] = agent.hidden_sizes

    save_params(all_params, PATH + '/output')
    #agent.actor.eval()
    #agent.critic.eval()

    rewards, avg_rewards, penalties, abs_rewards = train_agent(
        agent, noise, PATH, save_freq=SAVE_FREQ)

    figure_reward(rewards, avg_rewards, penalties, abs_rewards, PATH)
    save_rewards(rewards, avg_rewards, penalties, abs_rewards, PATH)
    S_climate, S_data, S_prod, A, df_inputs, start, Qdic = sim(agent,
                                                               ind=INDICE)
    save_Q(Qdic, PATH)
    start = df_inputs['Date'].iloc[0]
    final_indexes = compute_indexes(
        start, STEPS, env.frec
    )  #Es necesario crear nuevos indices para las graficas, depende de STEP
    figure_cost_gain(Qdic, final_indexes, PATH)
    figure_state(S_climate, final_indexes, PATH)
    figure_rh_par(S_data, final_indexes, PATH)
    figure_prod(S_prod, final_indexes, PATH)
    figure_actions(A, final_indexes, PATH)
    figure_inputs(df_inputs, PATH)

    t2 = time.time()
    season1_nn(agent, PATH, '')
    violin_reward(PATH, 'nn')  ##puede ser nn รณ expert
    violin_actions(PATH, 'nn')

    if not (SHOW):
        create_report(PATH, t2 - t1)
        send_correo(PATH + '/reports/Reporte.pdf')
        pass
Example #4
0
File: game.py Project: Dront/snake
    def run_logic(self):
        """
        Updates positions.
        """
        if self.state == State.RUN:

            # collision with wall
            if pygame.sprite.spritecollideany(self.player.head, self.map.obstacles):
                self.state = State.GAME_OVER

            # collision with tail
            elif pygame.sprite.spritecollideany(self.player.head, self.player.body):
                self.state = State.GAME_OVER

            # ate a fruit
            ate = pygame.sprite.spritecollideany(self.player.head, self.fruit.sprites())
            if ate is not None:
                self.player.eat(ate)
                if self.player.score > params['HIGH_SCORE']:
                    params['HIGH_SCORE'] = self.player.score
                    save_params()

                self.fruit.add(self.create_fruit())
            tmp_data = numpy.roll(tmp_data, vshift, axis=2)
            train_x.set_value(tmp_data)
            print "       ",
            cost += trainer.step()

            hshift = npy_rng.randint(-5, 5)
            vshift = npy_rng.randint(-5, 5)
            tmp_data = numpy.roll(ipart[0][:, :, :, ::-1], hshift, axis=3)
            tmp_data = numpy.roll(tmp_data, vshift, axis=2)
            train_x.set_value(tmp_data)
            print "       ",
            cost += trainer.step()

    cost /= i * 20.
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate*0.8)
    prev_cost = cost
    print "*** epoch %d cost: %f" % (epoch, cost)
    print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 30 == 0:
            save_params(model, 'CONV_5-5-3-3_32-48-64-128_3333_512-512-256-128-10_hflip_shift_lk.npy')
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
                                   rng=npy_rng)

    prev_cost = numpy.inf
    patience = 0
    for epoch in xrange(pretrain_epc):
        cost = trainer.epoch()
        if prev_cost <= cost:
            patience += 1
            if patience > 10:
                patience = 0
                trainer.set_learningrate(0.9 * trainer.learningrate)
            if trainer.learningrate < 1e-10:
                break
        prev_cost = cost
    save_params(
        model,
        'ZLIN_4000_1000_4000_1000_4000_1000_4000_normhid_nolinb_cae1_dropout.npy'
    )
print "Done."

#########################
# BUILD FINE-TUNE MODEL #
#########################

print "\n\n... building fine-tune model -- contraction 1"
for imodel in model.models_stack:
    imodel.threshold = 0.
model_ft = model + LogisticRegression(hid_layer_sizes[-1], 10, npy_rng=npy_rng)
model_ft.print_layer()

train_set_error_rate = theano.function(
    [],
Example #7
0
trainer = GraddescentMinibatch(
    varin=model.varin, data=train_x, 
    truth=model.models_stack[-1].vartruth, truth_data=train_y,
    supervised=True,
    cost=model.models_stack[-1].cost(), 
    params=model.params,
    batchsize=batchsize, learningrate=finetune_lr, momentum=momentum,
    rng=npy_rng
)

init_lr = trainer.learningrate
prev_cost = numpy.inf
for epoch in xrange(finetune_epc):
    cost = trainer.epoch()
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate*0.8)
    prev_cost = cost
    if epoch % 10 == 0:
        print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 100 == 0:
            save_params(model, 'CONV_3-4-3_32-48-64_256_1600-400-1600-10.npy')
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
Example #8
0
    -T.log2(  # apply log_2
        T.sum(  # summing over the 3rd dimention, which has 27 elements
            (model._prediction_for_training * model._input_frames[1:]),
            axis=2)))
#train_perplexity = theano.function([], ppw, givens={model.inputs:train_features})
valid_perplexity = theano.function([],
                                   ppw,
                                   givens={model.inputs: valid_features})

#model.monitor = model.normalizefilters

# TRAIN MODEL
trainer = graddescent_rewrite.SGD_Trainer(model,
                                          train_features_numpy,
                                          batchsize=50,
                                          learningrate=0.1,
                                          loadsize=50000,
                                          gradient_clip_threshold=1.0)

print "BEFORE_TRAINING: valid perplexity: %f" % (valid_perplexity())
print 'training...'
for epoch in xrange(100):
    trainer.step()
    save_params(model, 'rr_rr_rnn_on_chars_params.npy')
    # print "perplexity train: %f, valid: %f" % (train_perplexity(), valid_perplexity())
    print "valid perplexity: %f" % (valid_perplexity())

print "sampling from the model:" + ''.join(
    vec2chars(model.sample(numframes=1000), invdict))
pdb.set_trace()
    givens = {model_ft.varin : train_x[index * batchsize: (index + 1) * batchsize],
              truth : train_y[index * batchsize: (index + 1) * batchsize]},
)
def train_error():
    return numpy.mean([train_set_error_rate(i) for i in xrange(50000/batchsize)])

test_set_error_rate = theano.function(
    [index],
    T.mean(T.neq(model_ft.models_stack[-1].predict(), truth)),
    givens = {model_ft.varin : test_x[index * batchsize: (index + 1) * batchsize],
              truth : test_y[index * batchsize: (index + 1) * batchsize]},
)
def test_error():
    return numpy.mean([test_set_error_rate(i) for i in xrange(10000/batchsize)])
print "Done."
"""
trainer = GraddescentMinibatch(
    varin=model_ft.varin, data=train_x, 
    truth=model_ft.models_stack[-1].vartruth, truth_data=train_y,
    supervised=True,
    cost=model_ft.models_stack[-1].cost() + \
         model_ft.models_stack[-1].weightdecay(weightdecay),
    params=model_ft.models_stack[-1].params, 
    batchsize=batchsize, learningrate=logreg_lr, momentum=momentum,
    rng=npy_rng
)

init_lr = trainer.learningrate
prev_cost = numpy.inf
for epoch in xrange(logreg_epc):
    cost = trainer.epoch()
init_lr = trainer.learningrate
prev_cost = numpy.inf
for epoch in xrange(finetune_epc):
    i = 0
    cost = 0.
    for ipart in train_dg:
        print "part %d " % i,
        train_x.set_value(ipart[0])
        train_y.set_value(ipart[1])
        i += 1
        cost += trainer.step()
        print "       ",
        cost += trainer.step()
    cost /= i * 2. 
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate*0.8)
    prev_cost = cost
    print "*** epoch %d cost: %f" % (epoch, cost)
    print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 30 == 0:
            save_params(model, 'CONV_5-5-3-3_32-48-64-128_3333_512-512-256-128-2.npy')
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
        rng=npy_rng
    )

    prev_cost = numpy.inf
    patience = 0
    for epoch in xrange(pretrain_epc):
        cost = trainer.epoch()
        if prev_cost <= cost:
            patience += 1 
            if patience > 10:
                patience = 0
                trainer.set_learningrate(0.9 * trainer.learningrate)
            if trainer.learningrate < 1e-10:
                break
        prev_cost = cost
    save_params(model, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_normhid_nolinb_cae1_dropout.npy')
print "Done."


#########################
# BUILD FINE-TUNE MODEL #
#########################

print "\n\n... building fine-tune model -- contraction 1"
for imodel in model.models_stack:
    imodel.threshold = 0.
model_ft = model + LogisticRegression(
    hid_layer_sizes[-1], 10, npy_rng=npy_rng
)
model_ft.print_layer()
        print "       ",
        cost += trainer.step()

        # vertical flip
        train_x.set_value(ipart[0][:, :, ::-1, :])
        print "       ",
        cost += trainer.step()

        # rotate
        train_x.set_value(numpy.swapaxes(ipart[0], 2, 3))
        print "       ",
        cost += trainer.step()

    cost /= i * 4.0
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate * 0.8)
    prev_cost = cost
    print "*** epoch %d cost: %f" % (epoch, cost)
    print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 30 == 0:
            save_params(model, "CONV_5-5-3-3_32-48-64-128_3333_512-512-256-128-2_dtagmt.npy")
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
        # left rotate
        noise_mask = numpy.tile(npy_rng.binomial(1, 1-noise, (batchsize, 1, 250, 250)), (1, 3, 1, 1))
        train_x.set_value(noise_mask * rotate[:, :, ::-1, ::-1])
        print "       ",
        cost += trainer.step()
        
        # left rotate filp
        noise_mask = numpy.tile(npy_rng.binomial(1, 1-noise, (batchsize, 1, 250, 250)), (1, 3, 1, 1))
        train_x.set_value(noise_mask * rotate[:, :, :, ::-1])
        print "       ",
        cost += trainer.step()
        """

    cost /= i * 8.
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate*0.8)
    prev_cost = cost
    print "*** epoch %d cost: %f" % (epoch, cost)
    print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 30 == 0:
            save_params(model, 'CONV_5-5-3-3_32-48-64-128_3333_512-512-256-128-10_dtagmt2_denoise.npy')
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
Example #14
0
        cost=model.models_stack[i].cost(),
        params=model.models_stack[i].params_private,
        supervised=False,
        batchsize=1, learningrate=0.001, momentum=0., rng=npy_rng
    )

    layer_analyse = model.models_stack[i].encoder()
    layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
    layer_analyse.hist_weight()
    
    for epoch in xrange(15):
        trainer.step()
        layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
        layer_analyse.hist_weight()

save_params(model=model, filename="mnist_sae_784_784_784_10.npy")


#############
# FINE-TUNE #
#############

print "\n\nBegin fine-tune: normal backprop"
bp_trainer = GraddescentMinibatch(
    varin=model.varin, data=train_set_x, 
    truth=model.models_stack[-1].vartruth, truth_data=train_set_y,
    supervised=True, cost=model.models_stack[-1].cost(),
    params=model.params,
    batchsize=1, learningrate=0.1, momentum=0., 
    rng=npy_rng
)
Example #15
0
            (model._prediction_for_training * model._input_frames[1:]),
            axis=2
        )
    )
)
#train_perplexity = theano.function([], ppw, givens={model.inputs:train_features})
valid_perplexity = theano.function([], ppw, givens={model.inputs:valid_features})

#model.monitor = model.normalizefilters


# TRAIN MODEL
trainer = graddescent_rewrite.SGD_Trainer(model,
                                          train_features_numpy,
                                          batchsize=50,
                                          learningrate=0.1,
                                          loadsize=50000,
                                          gradient_clip_threshold=1.0)

print "BEFORE_TRAINING: valid perplexity: %f" % (valid_perplexity())
print 'training...'
for epoch in xrange(100):
    trainer.step()
    save_params(model, 'rr_rr_rnn_on_chars_params.npy')
    # print "perplexity train: %f, valid: %f" % (train_perplexity(), valid_perplexity())
    print "valid perplexity: %f" % (valid_perplexity())


print "sampling from the model:" + ''.join(vec2chars(model.sample(numframes=1000), invdict))
pdb.set_trace()
Example #16
0
                                   supervised=False,
                                   batchsize=1,
                                   learningrate=0.001,
                                   momentum=0.,
                                   rng=npy_rng)

    layer_analyse = model.models_stack[i].encoder()
    layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
    layer_analyse.hist_weight()

    for epoch in xrange(15):
        trainer.step()
        layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
        layer_analyse.hist_weight()

save_params(model=model, filename="mnist_sae_784_784_784_10.npy")

#############
# FINE-TUNE #
#############

print "\n\nBegin fine-tune: normal backprop"
bp_trainer = GraddescentMinibatch(varin=model.varin,
                                  data=train_set_x,
                                  truth=model.models_stack[-1].vartruth,
                                  truth_data=train_set_y,
                                  supervised=True,
                                  cost=model.models_stack[-1].cost(),
                                  params=model.params,
                                  batchsize=1,
                                  learningrate=0.1,
    for ipart in range(train_smp/part_size):
        print "part %d " % ipart,
        train_x.set_value(train_x_np[ipart * part_size : (ipart + 1) * part_size])
        train_y.set_value(train_y_np[ipart * part_size : (ipart + 1) * part_size])
        cost += trainer.epoch()
        print "       ",
        cost += trainer.epoch()
    print "part + ",
    train_x.set_value(train_x_np[-part_size :])
    train_y.set_value(train_y_np[-part_size :])
    cost += trainer.epoch()
    print "       ",
    cost += trainer.epoch()
    cost = cost / (train_smp/part_size + 1) / 2.
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate*0.8)
    prev_cost = cost
    if epoch % 10 == 0:
        print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 100 == 0:
            save_params(model, 'CONV_flt543_pool332_strd222_nflt32_64_128_512*3-10.npy')
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
Example #18
0
trainer = GraddescentMinibatch(
    varin=model.varin, data=train_x, 
    truth=model.models_stack[-1].vartruth, truth_data=train_y,
    supervised=True,
    cost=model.models_stack[-1].cost(), 
    params=model.params,
    batchsize=batchsize, learningrate=finetune_lr, momentum=momentum,
    rng=npy_rng
)

init_lr = trainer.learningrate
prev_cost = numpy.inf
for epoch in xrange(finetune_epc):
    cost = trainer.epoch()
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate*0.8)
    prev_cost = cost
    if epoch % 10 == 0:
        print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 100 == 0:
            save_params(model, 'svhn_CONV_flt2*5_pool2*5_nflt128*5_64*3-10.npy')
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
Example #19
0
import sys
from params import save_params, get_params
from misc import folder_setup, get_files
from qualitycheck import qualitycheck
from readpupil import read_pupil
from preprocess import preprocess
from epoch import epoch
from plot import plot_conds
import time

if __name__ == '__main__':

    params = get_params(sys.argv)
    eye_paths, events_path = get_files(**params)
    assert len(eye_paths) == len(
        events_path
    ), 'You must select the same number of pupil and event files.'
    t0 = time.time()
    for eye_path, events_path in zip(eye_paths, events_path):
        folder_setup(eye_path, params)
        print(params['out_dir'])
        pupil_data = read_pupil(eye_path)
        qualitycheck(pupil_data, **params)
        preprocess(pupil_data, **params)
        epoched = epoch(pupil_data, events_path, **params)
        plot_conds(epoched, **params)
        save_params(params)

    print('\nDone!\n')
    print('\n\nIn total, that took {} seconds!'.format(time.time() - t0))
    epc_cost += cost
    if step % (50000 / batchsize) == 0 and step > 0:
        # set stop rule
        ind = (step / (50000 / batchsize)) % avg
        hist_avg[ind] = crnt_avg[ind]
        crnt_avg[ind] = epc_cost
        if sum(hist_avg) < sum(crnt_avg):
            break
    
        # adjust learning rate
        if prev_cost <= epc_cost:
            patience += 1
        if patience > 10:
            trainer.set_learningrate(0.9 * trainer.learningrate)
            patience = 0
        prev_cost = epc_cost

        # evaluate
        print "***error rate: train: %f, test: %f" % (
            train_error(), test_error())
        
        epc_cost = 0.
print "Done."
print "***FINAL error rate, train: %f, test: %f" % (
    train_error(), test_error()
)
save_params(model, __file__.split('.')[0] + '_params.npy')

pdb.set_trace()
    givens = {model_ft.varin : train_x[index * batchsize: (index + 1) * batchsize],
              truth : train_y[index * batchsize: (index + 1) * batchsize]},
)
def train_error():
    return numpy.mean([train_set_error_rate(i) for i in xrange(50000/batchsize)])

test_set_error_rate = theano.function(
    [index],
    T.mean(T.neq(model_ft.models_stack[-1].predict(), truth)),
    givens = {model_ft.varin : test_x[index * batchsize: (index + 1) * batchsize],
              truth : test_y[index * batchsize: (index + 1) * batchsize]},
)
def test_error():
    return numpy.mean([test_set_error_rate(i) for i in xrange(10000/batchsize)])
print "Done."
"""
trainer = GraddescentMinibatch(
    varin=model_ft.varin, data=train_x, 
    truth=model_ft.models_stack[-1].vartruth, truth_data=train_y,
    supervised=True,
    cost=model_ft.models_stack[-1].cost() + \
         model_ft.models_stack[-1].weightdecay(weightdecay),
    params=model_ft.models_stack[-1].params, 
    batchsize=batchsize, learningrate=logreg_lr, momentum=momentum,
    rng=npy_rng
)

init_lr = trainer.learningrate
prev_cost = numpy.inf
for epoch in xrange(logreg_epc):
    cost = trainer.epoch()