Example #1
0
def train_nn(model, train_smiles, train_raw_targets, num_epoch=1000, batch_size=128, seed=0,
				validation_smiles=None, validation_raw_targets=None):

	train_targets, undo_norm = normalize_array(train_raw_targets)
	training_curve = []
	optimizer = optimizers.Adam()
	optimizer.setup(model)
	optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))	
	
	num_data = len(train_smiles)
	x = train_smiles
	y = train_targets
	sff_idx = npr.permutation(num_data)
	for epoch in range(num_epoch):
		for idx in range(0,num_data, batch_size):
			batched_x = x[sff_idx[idx:idx+batch_size
				if idx + batch_size < num_data else num_data]]
			batched_y = y[sff_idx[idx:idx+batch_size
				if idx + batch_size < num_data else num_data]]
			model.zerograds()
			loss = model(batched_x, batched_y)
			loss.backward()
			optimizer.update()
		if epoch % 100 == 0:
			train_preds = model.mse(train_smiles, train_raw_targets, undo_norm)
			cur_loss = loss._data[0]
			training_curve.append(cur_loss)
			print("Iteration", epoch, "loss", math.sqrt(cur_loss), \
				"train RMSE", math.sqrt((train_preds._data[0])))
			if validation_smiles is not None:
				validation_preds = model.mse(validation_smiles, validation_raw_targets, undo_norm)
				print("Validation RMSE", epoch, ":", math.sqrt((validation_preds._data[0])))

		
	return model, training_curve, undo_norm
Example #2
0
 def load_model_experiment():
     '''Initialize model'''
     trained_NNFP = Main(model_params)
     serializers.load_npz(args.load_npz, trained_NNFP)
     _, undo_norm = normalize_array(y_tests)
     mse, input_attention = trained_NNFP.mse(x_tests, y_tests, undo_norm)
     return math.sqrt(mse._data[0]), input_attention
Example #3
0
def train_nn(model,
             train_smiles,
             train_raw_targets,
             seed=0,
             validation_smiles=None,
             validation_raw_targets=None):

    num_print_examples = N_train
    train_targets, undo_norm = normalize_array(train_raw_targets)
    training_curve = []
    optimizer = optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))

    num_epoch = 1000
    num_data = len(train_smiles)
    batch_size = 64
    x = train_smiles
    y = train_targets
    sff_idx = npr.permutation(num_data)
    #TIME = time.time()
    for epoch in range(num_epoch):
        epoch_time = time.time()
        for idx in range(0, num_data, batch_size):
            batched_x = x[sff_idx[idx:idx + batch_size if idx +
                                  batch_size < num_data else num_data]]
            batched_y = y[sff_idx[idx:idx + batch_size if idx +
                                  batch_size < num_data else num_data]]
            #update_time	 = time.time()
            model.zerograds()
            loss = model(batched_x, batched_y)
            loss.backward()
            optimizer.update()
            #print("UPDATE TIME : ",  time.time() - update_time)
        #print "epoch ", epoch, "loss", loss._data[0]
        if epoch % 10 == 0:
            #print_time = time.time()
            train_preds = model.mse(train_smiles, train_raw_targets, undo_norm)
            cur_loss = loss._data[0]
            training_curve.append(cur_loss)
            print("Iteration", epoch, "loss", math.sqrt(cur_loss), \
             "train RMSE", math.sqrt((train_preds._data[0])))
            if validation_smiles is not None:
                validation_preds = model.mse(validation_smiles,
                                             validation_raw_targets, undo_norm)
                print("Validation RMSE", epoch, ":",
                      math.sqrt((validation_preds._data[0])))
            #print("PRINT TIME : ",  time.time() - print_time)
        print("1 EPOCH TIME : ", time.time() - epoch_time)
        #print loss

    return model, training_curve, undo_norm