Beispiel #1
0
    def pretrain(self, env):
        test_s, test_q_c, test_w = self._get_batch(env, config.PRETRAIN_BATCH)

        last_loss = 9999.
        for i in range(config.PRETRAIN_EPOCHS):
            utils.print_progress(i, config.PRETRAIN_EPOCHS, step=100)

            # print loss
            if utils.is_time(i, 100):
                lr = config.PRETRAIN_LR / (1 + i / 100)
                self.model.set_lr(lr)

                q = self.model(test_s)
                loss_c = self.model.get_loss_c(q, test_q_c, test_w)
                print("\nLoss: {:.4e} LR: {:.2e}".format(loss_c.data.item(), lr))

                if last_loss <= loss_c:     # stop early
                    break

                last_loss = loss_c

            s, q_c, w = self._get_batch(env, config.PRETRAIN_BATCH)
            self.model.train_c(s, q_c, w)

        self.model_.copy_weights(self.model, rho=1.0)
Beispiel #2
0
	def post(self):
		workout_title = self.request.get("workout")
		new_workout = Workout(parent = PARENT_KEY, title = workout_title)
		new_workout.put()
		arguments = self.request.arguments()
		exercises = [ {} for i in range( utils.findLength(arguments) ) ]
		print exercises
		for argu in arguments:
			if str(argu) != 'workout':
				num = utils.findPosition(argu)
				if utils.is_time(argu):
					exercises[num]['time'] = int(self.request.get(argu))
				if utils.is_rest(argu):
					exercises[num]['rest'] = int(self.request.get(argu))
				if utils.is_name(argu): 
					exercises[num]['name'] = str(self.request.get(argu))
		print exercises
		for counter, exercise in enumerate(exercises): ## Needs to be ordered
			print counter
			print exercise
			new_exercise = Exercise(parent = PARENT_KEY, 
									name = exercise['name'],
									time = exercise['time'],
									rest = exercise['rest'],
									workout_title =  workout_title,
									order =  counter)
			new_exercise.put()

		self.redirect('/home') 
Beispiel #3
0
if config.LOG_FILE:
    if args.load:
        log_file = open(config.LOG_FILE, 'a')
    else:
        log_file = open(config.LOG_FILE, 'w')

flatten = lambda l: [item for sublist in l for item in sublist]

finished_s = []
finished_y = []
finished_cnt = 0

print("\nTraining...")
for ep_steps in range(1, config.TRAINING_EPOCHS):
    if utils.is_time(ep_steps, config.EPOCH_STEPS):
        with torch.no_grad():
            fps_ = fps.fps(ep_steps)

            # trn_r, trn_fc, trn_acc = (0, 0, 0)
            trn_fc, trn_acc = log_trn.eval()
            val_fc, val_acc = log_val.eval()
            tst_fc, tst_acc = log_tst.eval()
            # tstg_r, tstg_fc, tstg_acc = (0, 0, 0)

            print()
            print(
                f"ep_steps {ep_steps}: TRN: {trn_fc:.2f}/{trn_acc:.2f} | VAL: {val_fc:.2f}/{val_acc:.2f} | TST: {tst_fc:.2f}/{tst_acc:.2f} | FPS: {fps_:.2f}"
            )

            if config.LOG_FILE:
Beispiel #4
0
lr = LR
losses = []
best_loss = 1e6
lr_fails = 0

net.set_lr(lr)
print("LR: {:.2e}".format(lr))

fps = utils.Fps()
fps.start()

for e in range(EPOCHS):
    train_x, train_y, train_lm = get_batch(BATCH_SIZE)
    net.train(train_x, train_y, MAX_LENGTH, train_lm)

    if utils.is_time(e, 100):
        pred_y, msks = net(test_x, MAX_LENGTH)
        pred_y = pred_y.argmax(dim=2).detach().cpu().numpy()

        cond = np.logical_or((pred_y == test_y), (1 - test_lm))
        corr = np.all(cond, 1).mean()
        test_loss = net.get_loss(test_x, test_y, MAX_LENGTH, test_lm).item()

        print("Epoch {}: loss {:.3f}, corr: {:.0f}%, fps: {:.1f}".format(
            e, test_loss, corr * 100, fps.fps(e)))
        losses.append(test_loss)

        if test_loss > best_loss:
            lr_fails += 1
            print("." * lr_fails)
Beispiel #5
0
while pool.total < config.POOL_SIZE:
    agent.step()
    utils.print_progress(pool.total, config.POOL_SIZE, step=10)

# clear cache
gc.collect()
torch.cuda.empty_cache()

lambda_last_grad = 0.
old_params = np.concatenate(
    [brain.model.param_array(), [config.FEATURE_FACTOR]])

print("\nStarting..")
for epoch in range(epoch_start, config.MAX_TRAINING_EPOCHS + 1):
    # save progress
    if utils.is_time(epoch, config.SAVE_EPOCHS):
        brain._save()

        save_data = {}
        save_data['epoch'] = epoch
        save_data['lr'] = brain.lr
        save_data['lmb'] = config.FEATURE_FACTOR
        save_data['avg_l'] = avg_l.__dict__

        with open('run.state', 'w') as file:
            json.dump(save_data, file)

    # update exploration
    if utils.is_time(epoch, config.EPSILON_UPDATE_EPOCHS):
        agent.update_epsilon(epoch)
        brain.update_epsilon(epoch)
print("Initializing pool..")
while pool.total < config.POOL_SIZE:
    agent.step()
    utils.print_progress(pool.total, config.POOL_SIZE, step=10)

# clear cache
gc.collect()
torch.cuda.empty_cache()

lambda_last_grad = 0.
old_params = np.concatenate([brain.model.param_array(), [config.FEATURE_FACTOR]])

print("\nStarting..")
for epoch in range(epoch_start, config.MAX_TRAINING_EPOCHS + 1):
    # save progress
    if utils.is_time(epoch, config.SAVE_EPOCHS):
        brain._save()

        save_data = {}
        save_data['epoch'] = epoch
        save_data['lr'] = brain.lr
        save_data['lmb'] = config.FEATURE_FACTOR
        save_data['avg_l'] = avg_l.__dict__

        with open('run.state', 'w') as file:
            json.dump(save_data, file)
    
    # update exploration
    if utils.is_time(epoch, config.EPSILON_UPDATE_EPOCHS):
        agent.update_epsilon(epoch)
        brain.update_epsilon(epoch)
Beispiel #7
0
	print(f"Metrics ({name}):")
	print(metrics.classification_report(y, y_))

	return acc

fps = utils.Fps()
fps.start()

best_acc = 0
fails = 0
lr = config.OPT_LR

print("\nTraining...")
for epoch in range(config.TRAINING_EPOCHS):
	if utils.is_time(epoch, config.EPOCH_STEPS):
		with torch.no_grad():
			fps_ = fps.fps(epoch)

			trn_acc = eval(data_trn, data_trn_y, 'train')
			val_acc = eval(data_val, data_val_y, 'validation')
			tst_acc = eval(data_tst, data_tst_y, 'test')

			# check for improvement
			if trn_acc > best_acc:
				fails = 0
				best_acc = trn_acc
				impr = "<"
			else:		
				impr = "."
Beispiel #8
0
#==============================
print("Initializing pool..")
while pool.total < config.POOL_SIZE:
    agent.step()
    utils.print_progress(pool.total, config.POOL_SIZE, step=10)

# clear cache
gc.collect()
torch.cuda.empty_cache()

print("\nStarting..")

for epoch in range(epoch_start, config.MAX_TRAINING_EPOCHS + 1):
    # save progress
    if utils.is_time(epoch, config.SAVE_EPOCHS):
        brain._save()

        save_data = {}
        save_data['epoch'] = epoch
        save_data['lr'] = brain.lr
        save_data['avg_r'] = avg_r.__dict__

        with open('run.state', 'w') as file:
            json.dump(save_data, file)

    # update exploration
    if utils.is_time(epoch, config.EPSILON_UPDATE_EPOCHS):
        agent.update_epsilon(epoch)
        brain.update_epsilon(epoch)