Example #1
0
def run(RUN_TRAIN, RUN_TEST, RUN_TRAIN2, RUN_TEST2, RUN_SAVE):
    tools.mkdir()
    if RUN_TRAIN : trainer()
    if RUN_TEST : tester()
    if RUN_TRAIN2 : trainer(type_=2)
    if RUN_TEST2 : tester(type_=2)
    if RUN_SAVE: tools.saver() 
Example #2
0
def run(RUN_TRAIN, RUN_TEST, RUN_TRAIN2, RUN_TEST2, RUN_SAVE):
    tools.initializer()
    if RUN_TRAIN : trainer()
    if RUN_TEST : tester()
    if RUN_TRAIN2 : trainer(type_=2)
    if RUN_TEST2 : tester(type_=2)
    if RUN_SAVE: tools.saver()
    if RUN_DENSECRF : crf_runner(LOAD_MAT_FILE, RUN_TRAIN2)
    if GRID_SEARCH : grid_search(LOAD_MAT_FILE, RUN_TRAIN2) 
    def __init__(self, memory_size=4):
        """Initialize Dialog Manager, Trainer Classes, Reading Training Data.

		Args:
			memory_size: Past Length of Conversation to be considered as History of the
						 conversation.

		"""
        self.conversation_count = 0

        # Initialize a queue with the required memory of past conversations
        self.history = deque("", memory_size)

        # Load the training sets for both states
        self.training_set_state1 = rdr.trainer_reader("training_data.csv").load()
        self.training_set_state2 = rdr.trainer_reader("training_data_2.csv").load()

        self.training_map_1 = dict({0: 1})
        self.training_map_2 = dict({0: 1})

        self.training_set_state1 = self._map_multiple_response(self.training_set_state1, self.training_map_1)
        self.training_set_state2 = self._map_multiple_response(self.training_set_state2, self.training_map_2)

        lambda_pi_1 = [
            0.2395105181493061,
            0.0006173868035890566,
            0.03237275574954535,
            0.0006901882640402509,
            0.44723785605466126,
            0.2795712949788579,
        ]
        lambda_pi_2 = [
            0.0015018046263117243,
            0.001374250681872707,
            0.12433409846344812,
            0.002426809340382256,
            0.663739481446308,
            0.06767643891963208,
        ]

        # Use the training sets to train two classifiers
        self.trn_1 = tr.trainer(0.002166020551556791, lambda_pi_1, self.training_set_state1)
        self.trn_2 = tr.trainer(0.006249742180327811, lambda_pi_2, self.training_set_state2)
        self.trn_1.train()
        self.trn_2.train()

        # Set threshold for dialogue generation
        self.threshold_1 = float("-0.000428095769801")
        self.threshold_2 = float("-0.00154795755273")
Example #4
0
File: p3.py Project: itsnoting/nnet
def main():
    #Training set initiated
    trainer_set = ubyte_unpack("./ubyte", "training")
    inputs, solutions = trainer_set.get_dataset(200)

    NN = Neural_Network()

    print "TRAINING CASES"

    #solutions = matrix_to_list(solutions)
    #Running with training cases before training
    yHat = matrix_to_list(NN.forward(inputs))
    before_solutions = matrix_to_list(solutions)
    percent = pred_perc(before_solutions, yHat)

    best_percent = 0
    best_weights = []
    T = trainer(NN)
    #Training Neural Network
    for i in range(100):
        yHat = matrix_to_list(NN.forward(inputs))
        before_solutions = matrix_to_list(solutions)
        percent = pred_perc(before_solutions, yHat)
        T.train(inputs, solutions)
        if percent > best_percent:
            print percent
            best_percent = percent
            best_W1, best_W2 = NN.get_weights()
            best_weights = [best_W1, best_W2]
        NN.set_weights(best_weights[0], best_weights[1])



    #Running with training cases after training
    yHat = matrix_to_list(NN.forward(inputs))
    solutions = matrix_to_list(solutions)
    percent = pred_perc(solutions, yHat)
    print "Prediction percentage after training:", percent * 100

    print "\nTESTING CASES"

    #Running with testing cases after training
    tester_set = ubyte_unpack("./ubyte", "testing")
    X, y = tester_set.get_dataset(2000)

    test_yHat = matrix_to_list(NN.forward(X))
    y = matrix_to_list(y)
    test_percent = pred_perc(y, test_yHat)
    print "Test prediction percentage:", test_percent * 100

    print "\n\nCurrent first level weights:"
    for weight in NN.W1:
        print weight
    print
    print "Current second level weights:"
    for weight in NN.W2:
        print weight
Example #5
0
 def __init__(self,samples,labels,kernel,c,strr):
     self.u= np.unique(labels)
     self.k=self.u.size
     self.kernel=kernel
     print "labels are: ", self.u
     dic=[]
     for i in range(self.k):
         indices1= labels==self.u[i]
         samples1= samples[indices1]
         labels1=np.ones(samples1.shape[0])
         for j in range(i+1, self.k):
             indices2= labels==self.u[j]
             samples2= samples[indices2]
             labels2=np.ones(samples2.shape[0])*(-1)
             t=trainer.trainer(kernel, c, str(self.u[i])+","+str(self.u[j]),strr+"/")
             t.train(np.vstack((samples1,samples2)), np.hstack((labels1,labels2)))
     np.savez(strr+'/labels', ulabels=self.u, k=1)
Example #6
0
def main():
    num_samples=100
    num_features=2
    samples = np.random.normal(size=num_samples * num_features).reshape(num_samples, num_features)
    labels = 2 * (samples.sum(axis=1) > 0) - 1.0

    svm01=trainer.trainer(kernel.Kernel.linear(), 20, '01')
    svm01.train(samples,labels)
    predictor01=predictor.predictor(samples,labels,kernel.Kernel.linear(),'01')
    print predictor01.predict(np.array([0,0]))
    print predictor01.predict(np.array([-1,-1]))
    print predictor01.predict(np.array([-2,-2]))
    print predictor01.predict(np.array([-5,-5]))

    #plt.subplot(2, 1, 1)
    #plt.scatter(samples[:,0].ravel(), samples[:,1].ravel(), c=labels, alpha=0.5)

    plt.show()
Example #7
0
 def __init__(self, samples, labels, kernel, c, strr):
     self.u = np.unique(labels)
     self.k = self.u.size
     self.kernel = kernel
     print "labels are: ", self.u
     dic = []
     for i in range(self.k):
         indices1 = labels == self.u[i]
         samples1 = samples[indices1]
         labels1 = np.ones(samples1.shape[0])
         for j in range(i + 1, self.k):
             indices2 = labels == self.u[j]
             samples2 = samples[indices2]
             labels2 = np.ones(samples2.shape[0]) * (-1)
             t = trainer.trainer(kernel, c,
                                 str(self.u[i]) + "," + str(self.u[j]),
                                 strr + "/")
             t.train(np.vstack((samples1, samples2)),
                     np.hstack((labels1, labels2)))
     np.savez(strr + '/labels', ulabels=self.u, k=1)
def create_trainer():
        tc = trainer_class(pokemon_trainer_dict)
        pokedex_dict = {'creator': "The Professor", 'version': "1.0", 'mode': "Local", 'max entries': 150, 'seen': 0, 'obtained': 0, 
        'local entries': dict(), 'national entries': dict()}
        
        player_dict = {'name': 'Stefan', 'gender': 'Male', 'age': 23, 'region': 'Orre', 'hometown': 'Gateon Port',
                       'height': "{}\'{}\"".format(5,11),'weight': "{} lbs.".format(185),
                       'trainer class': trainer_class(pokemon_trainer_dict),
                       'trainer id': generate_id(),
                       'pokedex': None,
                       'party': [],
                       'badges': {},
                       'money': 3000,
                       'is player': True,
                       'wins': 0, 'losses': 0, 'draws': 0
                       }
        player = trainer(trainer_dict=player_dict)
        player.pokedex = pokedex(dex_dict=pokedex_dict, trainer=player)
        player.inventory = set_inventory(player)
        return player
Example #9
0
def main(_args):
    args = gap.parser.parse_args(_args)

    val_accs = []
    all_gc_accs = []

    for i in range(10):
        start_time = time.perf_counter()
        dataset, train_loader, val_loader, test_loader = load_data(
            args.dataset, i)

        val_acc, gc_accs = trainer.trainer(args,
                                           args.dataset,
                                           train_loader,
                                           val_loader,
                                           test_loader,
                                           num_features=dataset.num_features,
                                           num_graph_class=dataset.num_classes,
                                           max_epoch=args.epochs,
                                           node_multi_label=False,
                                           graph_multi_label=False)
        val_accs.append(val_acc)
        all_gc_accs.append(np.array(gc_accs))
        end_time = time.perf_counter()
        spent_time = (end_time - start_time) / 60
        print(" It took: {:2f} minutes to complete one round....".format(
            spent_time))
        print(
            "\033[1;32m Best graph classification accuracy in {}th round is: {:4f} \033[0m"
            .format((i + 1), val_acc))

    all_gc_accs = np.vstack(all_gc_accs)
    all_gc_accs = np.mean(all_gc_accs, axis=0)
    final_gc = np.mean(val_accs)
    print(
        "\n\n\033[1;32m Average over 10 best results:  {:.4f}  \033[0m".format(
            final_gc))
    val_accs = ['{:.4f}'.format(i) for i in val_accs]
    print(" 10 Best results: ", np.asfarray(val_accs, float))
    print(" DiffPoll cross val:  {:.4f} ".format(np.max(all_gc_accs)))
    print(" DiffPoll argmax pos: ", np.argmax(all_gc_accs))
Example #10
0
def main():
    trainer = trainer_module.trainer()
    trainset,testset  = data_loader.load_data()
    accuracy = trainer.train(trainset,testset)
    trainset, testset = data_loader.load_data()

   
    v, fooling_rates, accuracies, total_iterations=adversarial_perturbation.generate(accuracy,trainset, testset, trainer.net)

    plt.title("Fooling Rates over Universal Iterations")
    plt.xlabel("Universal Algorithm Iter")
    plt.ylabel("Fooling Rate on test data")
    plt.plot(total_iterations,fooling_rates)
    plt.show()


    plt.title("Accuracy over Universal Iterations")
    plt.xlabel("Universal Algorithm Iter")
    plt.ylabel("Accuracy on Test data")
    plt.plot(total_iterations, accuracies)
    plt.show()
Example #11
0
def main(args):

    if args.type_model in ['GCN', 'GAT', 'GCNII']:
        layers = layers_GCN
    else:
        layers = layers_SGCN

    acc_test_layers = []
    MI_XiX_layers = []
    dis_ratio_layers = []
    for layer in layers:
        args.num_layers = layer
        if args.type_norm == 'group':
            args = reset_weight(args)
        acc_test_seeds = []
        MI_XiX_seeds = []
        dis_ratio_seeds = []
        for seed in seeds:
            args.random_seed = seed
            set_seed(args)
            trnr = trainer(args)
            acc_test, MI_XiX, dis_ratio = trnr.train_compute_MI()
            acc_test_seeds.append(acc_test)
            MI_XiX_seeds.append(MI_XiX)
            dis_ratio_seeds.append(dis_ratio)
        avg_acc_test = np.mean(acc_test_seeds)
        avg_MI_XiX = np.mean(MI_XiX_seeds)
        avg_dis_ratio = np.mean(dis_ratio_seeds)

        acc_test_layers.append(avg_acc_test)
        MI_XiX_layers.append(avg_MI_XiX)
        dis_ratio_layers.append(avg_dis_ratio)

    print(
        f'experiment results of {args.type_norm} applied in {args.type_model} on dataset {args.dataset}'
    )
    print('number of layers: ', layers)
    print('test accuracies: ', acc_test_layers)
    print('instance information gain: ', MI_XiX_layers)
    print('group distance ratio: ', dis_ratio_layers)
Example #12
0
 def __init__(self, samples, labels, kernel, c):
     self.u = np.unique(labels)
     self.k = self.u.size
     self.kernel = kernel
     print "labels are: ", self.u
     dic = []
     for i in range(self.k):
         indices1 = labels == self.u[i]
         samples1 = samples[indices1]
         labels1 = np.ones(samples1.shape[0])
         for j in range(i + 1, self.k):
             if (i == 0 and j == 1 or i == 0 and j == 2
                     or i == 1 and j == 2):
                 continue
             indices2 = labels == self.u[j]
             samples2 = samples[indices2]
             labels2 = np.ones(samples2.shape[0]) * (-1)
             t = trainer.trainer(kernel, c,
                                 str(self.u[i]) + "," + str(self.u[j]))
             t.train(np.vstack((samples1, samples2)),
                     np.hstack((labels1, labels2)))
     np.savez('multipliers/labels', ulabels=self.u, k=1)
Example #13
0
def create_trainer_2():
    tc = trainer_class(pokemon_trainer_dict)
    pokedex_dict = {
        'creator': "The Professor",
        'version': "1.0",
        'mode': "Local",
        'max entries': 150,
        'seen': 0,
        'obtained': 0,
        'local entries': dict(),
        'national entries': dict()
    }

    player_dict = {
        'name': 'Karl',
        'gender': 'Male',
        'age': 21,
        'region': 'Orre',
        'hometown': 'Gateon Port',
        'height': "{}\'{}\"".format(6, 1),
        'weight': "{} lbs.".format(175),
        'trainer class': trainer_class(pokemon_trainer_dict),
        'trainer id': generate_id(),
        'pokedex': None,
        'party': [],
        'badges': {},
        'money': 3000,
        'is player': True,
        'wins': 0,
        'losses': 0,
        'draws': 0
    }
    player = trainer(trainer_dict=player_dict)
    player.pokedex = pokedex(dex_dict=pokedex_dict, trainer=player)
    inv = inventory(inv_dict=test_inv_dict)
    inv.set_owner(player)
    player.inventory = set_inventory(trainer=player)
    #player.capture_pokemon(pokemon(gd=squirtle_d)._randomize_vital_statistics(set_level=5))
    return player
def create_trainer_2():
        tc = trainer_class(pokemon_trainer_dict)
        pokedex_dict = {'creator': "The Professor", 'version': "1.0", 'mode': "Local", 'max entries': 150, 'seen': 0, 'obtained': 0, 
        'local entries': dict(), 'national entries': dict()}
        
        player_dict = {'name': 'Karl', 'gender': 'Male', 'age': 21, 'region': 'Orre', 'hometown': 'Gateon Port',
                       'height': "{}\'{}\"".format(6,1),'weight': "{} lbs.".format(175),
                       'trainer class': trainer_class(pokemon_trainer_dict),
                       'trainer id': generate_id(),
                       'pokedex': None,
                       'party': [],
                       'badges': {},
                       'money': 3000,
                       'is player': True,
                       'wins': 0, 'losses': 0, 'draws': 0
                       }
        player = trainer(trainer_dict=player_dict)
        player.pokedex = pokedex(dex_dict=pokedex_dict, trainer=player)
        inv = inventory(inv_dict=test_inv_dict)
        inv.set_owner(player)
        player.inventory = set_inventory(trainer=player)
        #player.capture_pokemon(pokemon(gd=squirtle_d)._randomize_vital_statistics(set_level=5))
        return player
Example #15
0
def main():
    '''
    After the benchmarking, here we are training the model for real
    '''
    OUTER_BATCH = 1000
    CHNK_SIZE = 101
    EXTRACTIONS = 225
    U_LIM = .7

    INNER_BATCH = 25
    LR = 1e-4
    NUM_ST = 8
    HIDDEN_L = [4, 3]

    A = data_extractor("real_deal/ge_comp.dat", 14058, "if", upper_lim=U_LIM)
    mod = trainer(NUM_ST, HIDDEN_L)
    costs = []

    for i in range(EXTRACTIONS):

        data = A.sample_chunks(chunck_size=CHNK_SIZE, num_chuncks=OUTER_BATCH)
        b_prop = get_prop(format_data(data, 1))
        X, y = separate_Xy(b_prop)

        cost_data = mod.fit(X - 1,
                            y - 1,
                            iters=int(OUTER_BATCH / INNER_BATCH),
                            batch_size=INNER_BATCH,
                            lr=LR)
        costs.extend(cost_data)

    iters, cost_values = zip(*costs)
    plt.plot(numpy.array(iters), 10 * numpy.array(cost_values), label="costs")
    plt.legend()
    plt.show()

    torch.save(mod.mod.state_dict(), "real_deal/saved_model.dat")
def create_trainer():
    tc = trainer_class(pokemon_trainer_dict)
    pokedex_dict = {
        'creator': "The Professor",
        'version': "1.0",
        'mode': "Local",
        'max entries': 150,
        'seen': 0,
        'obtained': 0,
        'local entries': dict(),
        'national entries': dict()
    }

    player_dict = {
        'name': 'Stefan',
        'gender': 'Male',
        'age': 23,
        'region': 'Orre',
        'hometown': 'Gateon Port',
        'height': "{}\'{}\"".format(5, 11),
        'weight': "{} lbs.".format(185),
        'trainer class': trainer_class(pokemon_trainer_dict),
        'trainer id': generate_id(),
        'pokedex': None,
        'party': [],
        'badges': {},
        'money': 3000,
        'is player': True,
        'wins': 0,
        'losses': 0,
        'draws': 0
    }
    player = trainer(trainer_dict=player_dict)
    player.pokedex = pokedex(dex_dict=pokedex_dict, trainer=player)
    player.inventory = set_inventory(player)
    return player
Example #17
0
        from datasets import TextDataset
        dataset = TextDataset(cfg.DATA_DIR, split_dir,
                              base_size=cfg.TREE.BASE_SIZE,
                              transform=image_transform)
    assert dataset
    num_gpu = len(cfg.GPU_ID.split(','))
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=cfg.TRAIN.BATCH_SIZE * num_gpu,
        drop_last=True, shuffle=bshuffle, num_workers=int(cfg.WORKERS))

    # Define models and go to train/evaluate
    if not cfg.GAN.B_CONDITION:
        from trainer import GANTrainer as trainer
    else:
        from trainer import condGANTrainer as trainer
    algo = trainer(output_dir, dataloader, imsize)

    start_t = time.time()
    if cfg.TRAIN.FLAG:
        algo.train()
    else:
        algo.evaluate(split_dir)
    end_t = time.time()
    print('Total time for training:', end_t - start_t)
    ''' Running time comparison for 10epoch with batch_size 24 on birds dataset
        T(1gpu) = 1.383 T(2gpus)
            - gpu 2: 2426.228544 -> 4min/epoch
            - gpu 2 & 3: 1754.12295008 -> 2.9min/epoch
            - gpu 3: 2514.02744293
    '''
Example #18
0
            "lr": lr,
            "epoch": epoch,
            "seed": seed,
            "n_train": n_train,
            "initial_state_all_zero": initial_state_all_zero,
            "rslt_dir_name": rslt_dir_name
        }

        print("experiment_params")
        for key, val in experiment_params.items():
            print("\t{}:{}".format(key, val))

        RLT_DIR = create_RLT_DIR(experiment_params)

    # =========================== training part =========================== #
    mytrainer = trainer(model)
    mytrainer.set_result_saving(RLT_DIR, save_freq, saving_num)
    mytrainer.set_data_set(hidden_train, obs_train, hidden_test, obs_test)
    metrics, hidden_val, prediction_val = mytrainer.train(
        lr, epoch, print_freq)

    loss_trains, loss_tests, MSE_trains, MSE_tests = metrics
    hidden_val_train, hidden_val_test = hidden_val
    prediction_val_train, prediction_val_test = prediction_val

    # ==================== anther data saving part ==================== #
    if store_res and not use_stock_data:
        plot_hidden(RLT_DIR,
                    np.mean(hidden_val_train, axis=2),
                    hidden_train[0:saving_num],
                    is_test=False)
Example #19
0
        transforms.RandomCrop(imsize),
        transforms.RandomHorizontalFlip()
    ])

    from datasets import TextDataset

    dataset = TextDataset(cfg.DATA_DIR,
                          split_dir,
                          cfg.EMBEDDING_TYPE,
                          base_size=cfg.TREE.BASE_SIZE,
                          transform=image_transform)

    assert dataset
    num_gpu = len(cfg.GPU_ID.split(','))
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=cfg.TRAIN.BATCH_SIZE *
                                             num_gpu,
                                             drop_last=True,
                                             shuffle=bshuffle,
                                             num_workers=int(cfg.WORKERS))

    # Define models and go to train
    from trainer import condGANTrainer as trainer
    algo = trainer(output_dir, dataloader)

    start_t = time.time()
    if cfg.TRAIN.FLAG:
        algo.train()
    end_t = time.time()
    print('Total time for training:', end_t - start_t)
from load_dataset import load_dataset, train_test_split
from trainer import trainer

Dataset_paths = "Dataset/"
img_size, num_channels, num_classes = 28, 3, 10
train_percentage = 0.8
keep_prop = 0.75

batch_size = 32
num_of_epochs = 16

data, labels = load_dataset(Dataset_paths, img_size, num_channels, num_classes)
train_data, train_labels, val_data, val_labels = train_test_split(
    data, labels, train_percentage)
trainer(batch_size, num_of_epochs, keep_prop, train_data, train_labels,
        val_data, val_labels)
Example #21
0
    image_transform = transforms.Compose([
        transforms.RandomCrop(imsize),
    ])
    
    if cfg.DATASET_NAME == 'chexpert':
        dataset = ChexpertDataset(cfg.DATA_DIR, split_dir,
                            base_size=cfg.TREE.BASE_SIZE,
                            transform=image_transform)
    else:
        dataset = TextDataset(cfg.DATA_DIR, split_dir,
                            base_size=cfg.TREE.BASE_SIZE,
                            transform=image_transform)
    assert dataset
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
        drop_last=True, shuffle=bshuffle, num_workers=int(cfg.WORKERS))

    # Define models and go to train/evaluate
    algo = trainer(output_dir, dataloader, dataset.n_words, dataset.ixtoword)

    start_t = time.time()
    if cfg.TRAIN.FLAG:
        algo.train()
    else:
        '''generate images from pre-extracted embeddings'''
        if cfg.B_VALIDATION:
            algo.sampling(split_dir)  # generate images for the whole valid dataset
        else:
            gen_example(dataset.wordtoix, algo)  # generate images for customized captions
    end_t = time.time()
    print('Total time for training:', end_t - start_t)
Example #22
0
vocab = {}
word2id, id2word = {}, {}
with codecs.open(os.path.join(args.data, 'vocab_25K'), 'r', 'utf-8') as f:
    for word_row in f:
        word_id, word_str, count = word_row.strip().split("\t")
        word_id, count = int(word_id), int(count)
        word2id[word_str] = word_id
        id2word[word_id] = word_str
        vocab[word_str] = True

# creating dirs to save the model
if not os.path.isdir(args.out):
    os.makedirs(args.out)

# training
print('training started...')
targets, contexts = trainer(args, word2id, source_files_names, corpusSize,
                            args.out)

# saving
print('saving objects...')
full_out_name = os.path.join(args.out, 'stats.pkl')
save_obj = {
    'time': source_files_names,
    'context_size': args.window,
    'word_dim': args.size,
    'targets': targets,
    'contexts': contexts
}
pickle.dump(save_obj, open(full_out_name, 'wb'))
Example #23
0
import coco_proc, trainer

if __name__ == '__main__':

    z, zd, zt = coco_proc.process(context=5)
    trainer.trainer(z, zd)
Example #24
0
    statement_encoder
    , justification_encoder
    , multihead_Attention
    , position_Feedforward
    , 512
    , max_length_sentence
    )

#call to the trainer function
from trainer import trainer
path_to_save = None #Define it
path_to_checkpoint = None #Define it
trainer(
    model
    , dataloader_train
    , dataloader_val
    , -1
    , path_to_save
    , checkpoint_path)
#define liar_data_test as datasets.dataset with test data and test_dataloader on this
#dataset with batch_size = 1
liar_dataset_test = liar_dataset_val = dataset(prep_Data_from='test', purpose='test_class')
test_dataloader = DataLoader(liar_dataset_test, batch_size)

#function call to the infer function from utils.
from utils import infer
infer(model, test_dataloader)

module_list = [liar_dataset_train, liar_dataset_val, dataloader_train, dataloader_val, statement_encoder, justification_encoder, multiheadAttention, positionFeedForward, model]
del  liar_dataset_val, liar_dataset_train, dataloader_train, dataloader_val
Example #25
0
'''
Author: Igor Lapshun
'''

#This is the configurations/ meta parameters for this model
# (currently set to optimal upon cross valiation).
config = {
    'model_cnn':'/home/igor/PycharmProjects/GRU/models/vgg19_weights.h5',
    'data': '/home/igor/PycharmProjects/GRU/data/coco',
    'save_dir': 'anypath',
    'dim_cnn': 4096,
    'optimizer': 'adam',
    'batch_size': 128,
    'epoch': 300,
    'output_dim': 1024,
    'dim_word': 300,
    'lrate': 0.05,
    'max_cap_length' : 50,
    'cnn' : '10crop',
    'margin': 0.05
}


if __name__ == '__main__':
    import trainer
    trainer.trainer(config)
Example #26
0
elif args.mode == 'people':
	from people_conv import Generator, Discriminator
else:
	print 'Select mode from (mnist_fc, mnist_conv, people)'
	exit()

print 'done fetch data'

images /= 255


G = Generator()
D = Discriminator()
z_shape = G.in_size
imshape = D.imshape

data = np.zeros((images.shape[0],imshape[0],imshape[1],imshape[2])).astype(np.float32)
for i in xrange(images.shape[0]):
	data[i,:] = np.asarray(Image.fromarray(images[i]).resize((imshape[1],imshape[2])))

print 'start training'
loss_g_mem,loss_d_mem = trainer(G,D,data,len_z=z_shape,batchsize=args.batchsize,save_interval=args.save_interval,output_dir=args.output_dir,n_epoch=args.n_epoch,pre_epoch=args.pre_epoch,G_path=args.gen,D_path=args.dis)

fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(args.n_epoch-args.pre_epoch),loss_d_mem,'r')
ax.plot(range(args.n_epoch-args.pre_epoch),loss_g_mem,'b')
plt.title('loss')
plt.show()

plt.show()
Example #27
0
'''
Author: Igor Lapshun
'''

#This is the configurations/ meta parameters for this model
# (currently set to optimal upon cross valiation).
config = {
    'model_cnn': '/home/igor/PycharmProjects/GRU/models/vgg19_weights.h5',
    'data': '/home/igor/PycharmProjects/GRU/data/coco',
    'save_dir': 'anypath',
    'dim_cnn': 4096,
    'optimizer': 'adam',
    'batch_size': 128,
    'epoch': 300,
    'output_dim': 1024,
    'dim_word': 300,
    'lrate': 0.05,
    'max_cap_length': 50,
    'cnn': '10crop',
    'margin': 0.05
}

if __name__ == '__main__':
    import trainer
    trainer.trainer(config)
    label_loader = torch.utils.data.DataLoader(
        label_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE * num_gpu,
        drop_last=True,
        shuffle=True,
        num_workers=int(cfg.WORKERS))
    unlabel_loader = torch.utils.data.DataLoader(
        unlabel_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE * num_gpu,
        drop_last=True,
        shuffle=True,
        num_workers=int(cfg.WORKERS))

    from trainer import condGANTrainer as trainer
    algo = trainer(output_dir, label_loader, unlabel_loader)

    start_t = time.time()
    if cfg.TRAIN.FLAG:
        algo.train()
    else:
        db_dataset = Cifar10Folder(cfg.DATA_DIR,
                                   "cifar10_unlabel",
                                   base_size=cfg.TREE.BASE_SIZE,
                                   transform=image_transform)
        test_dataset = Cifar10Folder(cfg.DATA_DIR,
                                     "cifar10_test",
                                     base_size=cfg.TREE.BASE_SIZE,
                                     transform=image_transform)

        db_dataloader = torch.utils.data.DataLoader(
Example #29
0
from trainer import trainer
import os
import time

hps = {'max_epoch': 178}

with tf.Session() as sess:
  
  is_training = True
  path = '/home/yao/cifar10/tmp/DenseNet'
  Input = Input(is_training=is_training, batch_num=128)

  if is_training is True:
    if os.path.exists(path):
      for fname in os.listdir(path):
        os.remove(path+'/'+fname)
    else:
      os.mkdir(path)

  model = DenseNet(k=12)
  model_trainer = trainer(sess, model, Input, hps, path)

  if is_training is True:
    sess.run(tf.global_variables_initializer())
    model_trainer.train()

  else:
    saver = tf.train.Saver()
    path = tf.train.latest_checkpoint(path)
    saver.restore(sess, path)
    model_trainer.eval()
Example #30
0
import torch

import utils
from option import args
from data import data
from trainer import trainer

torch.manual_seed(args.seed)
checkpoint = utils.checkpoint(args)

if checkpoint.ok:
    myLoader = data(args).getLoader()
    t = trainer(myLoader, checkpoint, args)
    while not t.terminate():
        t.train()
        t.test()

    checkpoint.done()
Example #31
0
def run_trainer(ps_rref):
    trainer_instance = trainer_class.trainer(ps_rref)
    trainer_instance.train()
Example #32
0
File: main.py Project: yk287/ML
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=opts.batch,
                                          shuffle=True)

testset = datasets.FashionMNIST('FMNIST_data/',
                                download=True,
                                train=False,
                                transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False)

import trainer
from tester import tester

#Hyperparameters for our network

output_size = opts.num_classes
model = discriminator(opts).to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),
                       lr=opts.lr,
                       betas=(opts.beta1, opts.beta2))

#trains the model
trainer = trainer.trainer(opts, trainloader, model, optimizer, criterion)
trainer.train()

#trains the model
tester = tester(opts, testloader, model)
tester.test()
Example #33
0
        previous_model = nn.Sequential(*previous_layers)

    cfg = {
        'm': np.sqrt(DIM) if args.loss == 'tcl' else 0,
        'epoch': EPOCH,
        'class_num': CLASS_NUM,
        'layer': LAYER,
        'previousModel': previous_model,
        'loss': args.loss
    }

    loader = {'train': trainLoader, 'val': valLoader}

    for ep in range(EPOCH):

        train_loss, val_loss = trainer.trainer(model, loader, optimizer,
                                               loss_fn, cfg, centers, ep)

        t_losses.append(train_loss)
        v_losses.append(val_loss)

        state = {
            'epoch': EPOCH,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'train_loss': t_losses,
            'val_loss': v_losses,
            'dim': DIM,
            'lr': LR,
            'batch_size': BATCH_SIZE,
            'centers': centers
        }
Example #34
0
    timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')

    train_dataset = datasets.__dict__[args.dataset](is_train=True,
                                                    **vars(args))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_dataset = datasets.__dict__[args.dataset](is_train=False, **vars(args))
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # Define models and go to train.evaluate
    algo = trainer(train_loader, val_loader, args, njoints, device, idx)

    start_t = time.time()
    if args.evaluate:
        print('\nEvaluation only')
        loss, acc, predictions = algo.validate()
        save_pred(predictions, checkpoint=args.checkpoint)
    else:
        algo.main()

    end_t = time.time()
    print('Total time for training:', end_t - start_t)
liar_dataset_train = dataset(prep_Data_from='train')
liar_dataset_val = dataset(prep_Data_from='val')
sent_len, just_len = liar_dataset_train.get_max_lenghts()
dataloader_train = DataLoader(dataset=liar_dataset_train, batch_size=50)
dataloader_val = DataLoader(dataset=liar_dataset_val, batch_size=25)
statement_encoder = Encoder(hidden_dim=512, conv_layers=5)
justification_encoder = Encoder(hidden_dim=512, conv_layers=5)
multiheadAttention = MultiHeadAttention(hid_dim=512, n_heads=32)
positionFeedForward = PositionFeedforward(hid_dim=512, feedForward_dim=2048)
model = arePantsonFire(statement_encoder, justification_encoder,
                       multiheadAttention, positionFeedForward, 512, sent_len,
                       just_len, liar_dataset_train.embedding_dim, 'cpu')
trainer(model,
        dataloader_train,
        dataloader_val,
        num_epochs=1,
        train_batch=1,
        test_batch=1,
        device='cpu')

# Do not change module_list , otherwise no marks will be awarded
module_list = [
    liar_dataset_train, liar_dataset_val, dataloader_train, dataloader_val,
    statement_encoder, justification_encoder, multiheadAttention,
    positionFeedForward, model
]
del liar_dataset_val, liar_dataset_train, dataloader_train, dataloader_val

liar_dataset_test = dataset(prep_Data_from='test')
test_dataloader = DataLoader(dataset=liar_dataset_test, batch_size=1)
infer(model=model, dataloader=test_dataloader)
Example #36
0
                        help='Path to dataset')
    parser.add_argument('-device', type=str, default="CPU", help='Device')
    parser.add_argument('-config', type=str, default=None, help='config')
    args = parser.parse_args()

    if args.config is None:
        exit("No config.ini found.")

    con = Configer(args.config)

    os.environ["DATASET"] = con.trainer.get_dataset()

    # newsender = sender(log, url_="http://node0:26657")
    newsender = sender(log, url_=con.bcfl.get_sender())
    # newdb = moddb("ipfs")
    newdb = moddb(con.bcfl.get_db())

    newagg = aggregator(log, con, newdb, newsender)
    # newtrain = trainer(log, args.dataset, newdb, newsender, devices=args.device)
    newtrain = trainer(log, con, newdb, newsender)

    newcontroller = State_controller(log, newtrain, newagg,
                                     con.agg.get_threshold())

    # Create the app
    # app = ABCIServer(app=SimpleBCFL(newcontroller), port=args.p)
    app = ABCIServer(app=SimpleBCFL(newcontroller),
                     port=con.bcfl.get_app_port())
    # Run it
    app.run()
Example #37
0
def train_single_env(env, Agent):
    trainer(env, Agent)
Example #38
0
            trainDict = {
                'TrainDF': trainDF,
                'ValDF': valDF,
                'FeaturesKeys': inpDict['FeaturesKeys'],
                'TargetsKeys': inpDict['TargetsKeys']
            }

            # get 20 most imp features
            fulFeaMod = ffwReg(n_inp=len(trainDict['FeaturesKeys']),
                               n_out=len(trainDict['TargetsKeys']),
                               n_hidd=networkParams['n_hidd'],
                               n_hidd_layers=networkParams['n_hidd_layers'],
                               act_fun=networkParams['act_func'],
                               dropout=networkParams['dropout'])
            fulFeaMod = trainer(fulFeaMod,
                                trainDict,
                                networkParams,
                                gpuId=project.setGPU)
            featuresImpLst = getFeatureImportance(fulFeaMod,
                                                  trainDict['ValDF'],
                                                  trainDict['FeaturesKeys'],
                                                  trainDict['TargetsKeys'])
            trainDict['FeaturesKeys'] = featuresImpLst[0:20]

            # train the model
            model = ffwReg(n_inp=len(trainDict['FeaturesKeys']),
                           n_out=len(trainDict['TargetsKeys']),
                           n_hidd=networkParams['n_hidd'],
                           n_hidd_layers=networkParams['n_hidd_layers'],
                           act_fun=networkParams['act_func'],
                           dropout=networkParams['dropout'])
            model = trainer(model,
import trainer as t
from datetime import datetime

print(datetime.now().time())
test = t.trainer()
test.sequenceToSequenceInference()



Example #40
0
                               base_size=cfg.INITIAL_IMAGE_SIZE,
                               transform=image_transform)
    # elif cfg.DATASET_NAME == 'flowers':
    #     from datasets import FlowersDataset
    #
    #     dataset = FlowersDataset(cfg.DATA_DIR, split_dir,
    #                              base_size=cfg.INITIAL_IMAGE_SIZE,
    #                              transform=image_transform)
    assert dataset

    num_gpu = len(cfg.GPU_ID.split(','))

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=cfg.TRAIN.BATCH_SIZE *
                                             num_gpu,
                                             drop_last=True,
                                             shuffle=bshuffle,
                                             num_workers=int(cfg.WORKERS))

    # Define models and go to train/evaluate
    from trainer import RecurrentGANTrainer as trainer
    algo = trainer(output_dir, dataloader, imsize)

    start_t = time.time()
    if cfg.TRAIN.FLAG:
        algo.train()
    else:
        algo.evaluate(split_dir)
    end_t = time.time()
    print 'Total time for training:', end_t - start_t
Example #41
0
def main(_):
    FLAGS = tf.app.flags.FLAGS

    # ========================================= parameter part begins ========================================== #
    Dx = FLAGS.Dx

    # --------------------- SSM flags --------------------- #
    # should q use true_X to sample? (useful for debugging)
    q_uses_true_X = FLAGS.q_uses_true_X

    # whether use input in q and f
    use_input = FLAGS.use_input

    # --------------------- printing and data saving params --------------------- #

    print_freq = FLAGS.print_freq

    if FLAGS.use_input:
        FLAGS.use_residual = False
    if FLAGS.use_2_q:
        FLAGS.q_uses_true_X = q_uses_true_X = False
    if FLAGS.flow_transition:
        FLAGS.use_input = use_input = False
    if FLAGS.TFS:
        FLAGS.use_input = use_input = False

    tf.set_random_seed(FLAGS.seed)
    np.random.seed(FLAGS.seed)

    # ============================================= dataset part ============================================= #
    # generate data from simulation
    if FLAGS.generateTrainingData:
        model = "fhn"
        hidden_train, hidden_test, obs_train, obs_test, input_train, input_test = \
            generate_dataset(FLAGS.n_train, FLAGS.n_test, FLAGS.time, model=model, Dy=FLAGS.Dy, lb=-2.5, ub=2.5)

    # load data from file
    else:
        hidden_train, hidden_test, obs_train, obs_test, input_train, input_test = \
            load_data(FLAGS.datadir + FLAGS.datadict, Dx, FLAGS.Di, FLAGS.isPython2, use_input, q_uses_true_X)
        FLAGS.n_train, FLAGS.n_test, FLAGS.time = obs_train.shape[
            0], obs_test.shape[0], obs_test.shape[1]

    # clip saving_num to avoid it > n_train or n_test
    FLAGS.MSE_steps = min(FLAGS.MSE_steps, FLAGS.time - 1)
    FLAGS.saving_num = saving_num = min(FLAGS.saving_num, FLAGS.n_train,
                                        FLAGS.n_test)

    print("finished preparing dataset")

    # ============================================== model part ============================================== #
    SSM_model = SSM(FLAGS)

    # SMC class to calculate loss
    SMC_train = SMC(SSM_model, FLAGS, name="log_ZSMC_train")

    # =========================================== data saving part =========================================== #
    # create dir to save results
    Experiment_params = {
        "np": FLAGS.n_particles,
        "t": FLAGS.time,
        "bs": FLAGS.batch_size,
        "lr": FLAGS.lr,
        "epoch": FLAGS.epoch,
        "seed": FLAGS.seed,
        "rslt_dir_name": FLAGS.rslt_dir_name
    }

    RLT_DIR = create_RLT_DIR(Experiment_params)
    save_experiment_param(RLT_DIR, FLAGS)
    print("RLT_DIR:", RLT_DIR)

    # ============================================= training part ============================================ #
    mytrainer = trainer(SSM_model, SMC_train, FLAGS)
    mytrainer.init_data_saving(RLT_DIR)

    history, log = mytrainer.train(obs_train, obs_test, hidden_train,
                                   hidden_test, input_train, input_test,
                                   print_freq)

    # ======================================== final data saving part ======================================== #
    with open(RLT_DIR + "history.json", "w") as f:
        json.dump(history, f, indent=4, cls=NumpyEncoder)

    Xs, y_hat = log["Xs"], log["y_hat"]
    Xs_val = mytrainer.evaluate(Xs, mytrainer.saving_feed_dict)
    y_hat_val = mytrainer.evaluate(y_hat, mytrainer.saving_feed_dict)
    print("finish evaluating training results")

    plot_training_data(RLT_DIR, hidden_train, obs_train, saving_num=saving_num)
    plot_y_hat(RLT_DIR, y_hat_val, obs_test, saving_num=saving_num)

    if Dx == 2:
        plot_fhn_results(RLT_DIR, Xs_val)
    if Dx == 3:
        plot_lorenz_results(RLT_DIR, Xs_val)

    testing_data_dict = {
        "hidden_test": hidden_test[0:saving_num],
        "obs_test": obs_test[0:saving_num]
    }
    learned_model_dict = {"Xs_val": Xs_val, "y_hat_val": y_hat_val}
    data_dict = {
        "testing_data_dict": testing_data_dict,
        "learned_model_dict": learned_model_dict
    }

    with open(RLT_DIR + "data.p", "wb") as f:
        pickle.dump(data_dict, f)

    plot_MSEs(RLT_DIR, history["MSE_trains"], history["MSE_tests"], print_freq)
    plot_R_square(RLT_DIR, history["R_square_trains"],
                  history["R_square_tests"], print_freq)
    plot_log_ZSMC(RLT_DIR, history["log_ZSMC_trains"],
                  history["log_ZSMC_tests"], print_freq)