コード例 #1
0
    def __init__(self):
        self.NeuralNetwork = Neural_Network()
        X = np.array(([0, 0], [1, 0], [0, 1], [1, 1]), dtype=float)
        y = np.array(([0], [1], [1], [1]), dtype=float)
        y = y / 1
        self.TrainNetwork = trainer(self.NeuralNetwork)
        self.TrainNetwork.train(X, y)

        #Initialize and train neural Network

        self.AIMLPs = AIMLParser()
        self.logObj = LogParser()
        #Initialize Helper Objects

        self.logFile = open("/var/log/apache2/error.log", 'r')  #log file Open

        self.readTime = open("time.txt", 'r')

        self.lastStat = self.readTime.readline()
        self.updatStat = self.lastStat

        self.Xin = self.parseFile(
            self.logFile)  ## parse log file form current time
        self.Xin = [1.0, 1.0]
        self.ResOut = self.NeuralNetwork.forward(self.Xin)

        if 1.0 == round(self.ResOut[0]):
            self.command = self.getCommand('Apache2')
            self.doOperation()
            print "Restart successful!"
        self.logFile.close()
コード例 #2
0
def choose_learning_rate(data, size_voca, num_classes):
    learning_rate = [ 1, 0.1, 0.01, 0.001 ]
    kwargs = kwargs_global.copy()

    # full batch need not to change anything

    # mini batch config
    kwargs["batch_size"] = 50
    kwargs["num_epochs"] = 400
    kwargs["save_freq"] = 20
    kwargs["check_freq"] = 4

    # SGD config
    # kwargs["batch_size"] = 1
    # kwargs["num_epochs"] = 100
    # kwargs["save_freq"] = 800
    # kwargs["check_freq"] = 4
    # train models and plot loss
    for lr in learning_rate:
        # train
        model = logistic_model(input_dims=size_voca, class_nums=num_classes, weight_scale=1e-2, reg=1e-4)
        kwargs['learning_rate'] = lr
        Train = trainer(model=model, data=data, kwargs=kwargs.copy())
        _, info = Train.train()
        # plot losses
        x_loss = (np.arange(len(info[1])) + 1) * kwargs['save_freq']
        plt.plot(x_loss, info[1], label=("lr: " + str(lr)))
    
    plt.legend()
    plt.title("Loss")
    plt.xlabel("Step")
    plt.show()
コード例 #3
0
def main():

  #amp_handle = amp.init(enabled=True)

  # save a checkpoint at exit, regardless of the reason
  if pargs.checkpoint:
    atexit.register(trainer.checkpoint, model, optimizer, tag='exit', log=print)

  for (epoch, batch, step, bar), data in trainer(train_dataset, epochs=pargs.epochs, 
    progress='Training', shuffle=pargs.shuffle_training, batch_size=pargs.batch_size):

    trainer.state_dict['step'] = step + 1 # resume starting on the next step

    optimizer.zero_grad()

    model.train() if not model.training else None

    loss_value, out = model(step, *data)
    loss_value = loss_value.mean() # multi-gpu accumulation
    torch.cuda.synchronize()

    loss_value.backward()
    torch.cuda.synchronize()

    trainer.log(step, loss=loss_value, m=model_obj.logger(step, data, out))
    
    #with amp_handle.scale_loss(loss_value, optimizer) as scaled_loss:
    #  scaled_loss.backward()
    
    # TODO: validation logging isn't working well, but I don't need it right now
    # if step % pargs.validation_frequency == 0 and step != 0:
    #   model.eval()
    #   val_gen = trainer(validation_dataset, batch_size=pargs.batch_size, progress='Validation', grad=False, leave=False)
    #   validation_loss = sum(model(*data)[0].item() for _, data in val_gen) / len(val_gen)
    #   trainer.log(step, validation=validation_loss)

    #   if validation_loss < trainer.state_dict.get('best_validation_loss', validation_loss + 1):
    #     trainer.state_dict['best_validation_loss'] = validation_loss
    #     path = trainer.checkpoint(model, optimizer, tag='best_validation', log=bar.write)

    # if step % pargs.checkpoint_frequency == 0  and step != 0:
    #   path = trainer.checkpoint(model, optimizer, tag='recent', log=bar.write)

    #   trainer.state_dict['running_training_loss'] = .95 * trainer.state_dict.get('running_training_loss', loss_value) + .05 * loss_value
    #   if trainer.state_dict['running_training_loss'] < trainer.state_dict.get('best_training_loss', 1e10):
    #     trainer.state_dict['best_training_loss'] = trainer.state_dict['running_training_loss']
    #     path = trainer.checkpoint(model, optimizer, tag='best_training', log=bar.write)

    #torch.nn.utils.clip_grad_value_(model.parameters(), pargs.max_grad)
    torch.nn.utils.clip_grad_norm_(model.parameters(), pargs.max_grad_norm)
    optimizer.step()

    if scheduler.step(step):
      trainer.log(step, lr=scheduler.lr(step))

    del loss_value
    del out
コード例 #4
0
def temp():
    # config model
    model = logistic_model(input_dims=size_voca, class_nums=num_classes, weight_scale=1e-2, reg=1e-4)

    # Training
    Train = trainer(model=model, data=data, kwargs=kwargs.copy())
    model, info = Train.train()
    
    # plot
    plot_loss_acc(kwargs.copy(), info)

    pred_test_y, _ = model.loss(test_x)
    test_acc = np.sum(np.argmax(pred_test_y, axis=1) == np.argmax(test_y, axis=1)) / len(test_x)
    print("test acc: ")
    print(test_acc, test)
コード例 #5
0
def training_best_model(data, size_voca, num_classes, test_data, kwargs, learning_rate):
    best_val_acc = 0
    best_model = None
    best_kwargs = None
    best_info = None
    # train models and plot loss
    for lr in learning_rate:
        # train
        model = logistic_model(input_dims=size_voca, class_nums=num_classes, weight_scale=1e-2, reg=1e-4)
        kwargs['learning_rate'] = lr
        Train = trainer(model=model, data=data, kwargs=kwargs.copy())
        model, info = Train.train()
        if best_val_acc < info[0]:
            best_val_acc = info[0]
            best_model = model
            best_kwargs = kwargs.copy()
            best_info = info
    print("best hyparameter: ", best_kwargs)
    get_test_acc(test_data, best_model)
    plot_loss_acc(best_kwargs, best_info)
コード例 #6
0
def main():
    params = NetConfig('config.yaml')
    print('-----------------> preparing DataLoader')
    dataset_args = params.hyperparameters['dataset']
    loader_args = params.hyperparameters['loader']
    batch_size = params.hyperparameters['network']['net']['batch_size']
    name = params.hyperparameters['network']['net']['name']
    discription = params.discription
    save_path = params.save_path
    epochs = params.epochs

    train_loader = torch.utils.data.DataLoader(
        dataset=ImageDataset(**dataset_args),
        batch_size=batch_size,
        **loader_args)
    print('-----------------> preparing model: {}'.format(name))
    net = network(params.hyperparameters['network'])
    coach = trainer(net, save_path, name, discription)
    coach.train(data_loader=train_loader, epochs=epochs)
    print('-----------------> start training')
コード例 #7
0
def change_batch_size1(data, size_voca, num_classes):
    batch_sizes = [ 1, 20, 200, 2000 ]
    kwargs = kwargs_global.copy()
    kwargs['num_epochs'] = 30
    # train models and plot loss
    for bz in batch_sizes:
        # train
        model = logistic_model(input_dims=size_voca, class_nums=num_classes, weight_scale=1e-2, reg=1e-4)
        kwargs['batch_size'] = bz
        kwargs['save_freq'] = kwargs_global['save_freq'] // (1 + np.log(bz))
        Train = trainer(model=model, data=data, kwargs=kwargs.copy())
        _, info = Train.train()
        # plot losses
        x_loss = (np.arange(len(info[1])) + 1) * kwargs['save_freq'] * kwargs["batch_size"]
        plt.plot(x_loss, info[1], label=("batch size: " + str(bz)))

    plt.legend()
    plt.title("Loss")
    plt.xlabel("data num")
    plt.ylim(0, 2.5)
    plt.show()
コード例 #8
0
from modeling.layers.ONet import *
from trainer import *

if __name__ == '__main__':
    net = ONet()
    trainer = Trainer(net,
                      data_path='data/48',
                      save_path='weights/onet.pth',
                      batch_size=512)
    trainer(stop_value=0.001, net='onet')
コード例 #9
0
from trainer import *

if __name__ == '__main__':
    train = trainer()
    train.train()

コード例 #10
0
multiheadAttention = MultiHeadAttention(hid_dim=512, n_heads=32)
positionFeedForward = PositionFeedforward(hid_dim=512, feedForward_dim=2048)
sent_length, just_max = liar_dataset_train.get_max_lenghts()
model = arePantsonFire(sentence_encoder=statement_encoder,
                       explanation_encoder=justification_encoder,
                       multihead_Attention=multiheadAttention,
                       position_Feedforward=positionFeedForward,
                       hidden_dim=512,
                       max_length_sentence=sent_length,
                       max_length_justification=just_max,
                       input_dim=200)
trainer(model=model,
        train_dataloader=dataloader_train,
        val_dataloader=dataloader_val,
        num_epochs=101,
        path_to_save='C:/Users/suchi/Downloads/2017B2A70585P/checkpoint',
        checkpoint_path='C:/Users/suchi/Downloads/2017B2A70585P/checkpoint',
        checkpoint=10,
        train_batch=1,
        test_batch=1)
# Do not change module_list , otherwise no marks will be awarded
module_list = [
    liar_dataset_train, liar_dataset_val, dataloader_train, dataloader_val,
    statement_encoder, justification_encoder, multiheadAttention,
    positionFeedForward, model
]
del liar_dataset_val, liar_dataset_train, dataloader_train, dataloader_val

liar_dataset_test = dataset(prep_Data_from='test')
test_dataloader = DataLoader(dataset=liar_dataset_test, batch_size=1)
infer(model=model, dataloader=test_dataloader)
コード例 #11
0
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--data', required=True, help='directory containing mask annotation')
    parser.add_argument(
        '--path', required=True,
        help='the path that contains raw coco JPEG images')
    parser.add_argument('--traintest', type=int, default=1)
    parser.add_argument('--logdir', required=False, default='./logs')
    parser.add_argument('--checkpoint_dir', required=False, default=False)
    parser.add_argument('--salmodelpath', required=True)
    parser.add_argument('--LAMBDAsal', type=float, required=False, default=25000, help='lambda parameter for saliency loss')
    parser.add_argument('--LAMBDA_r', type=float, required=False, default=10, help='z reconstruction loss')
    parser.add_argument('--LAMBDAFM', type=float, required=False, default=10, help='lambda parameter for FM loss')
    parser.add_argument('--LAMBDAD', type=float, required=False, default=10, help='lambda parameter for critic loss')
    parser.add_argument('--LAMBDA_p', type=float, required=False, default=0.1, help='lambda parameter for perceptual loss')
    parser.add_argument('--LAMBDA_s', type=float, required=False, default=0.1, help='lambda parameter for sparse loss')
    parser.add_argument('--LAMBDA_tv', type=float, required=False, default=10, help='lambda parameter for TV regularization')
    parser.add_argument('--lr', type=float, required=False, default=0.00001)
    parser.add_argument('--lrd', type=float, required=False, default=0.00001)
    parser.add_argument('--maxstep', type=int, required=False, default=400)
    parser.add_argument('--nb_its', type=int, required=False, default=150000)
    parser.add_argument('--batch_size', type=int, required=False, default=4)
    parser.add_argument('--print_freq', type=int, required=False, default=500)
    parser.add_argument('--n_dis', type=int, default=3, help='number of discriminator layers')
    parser.add_argument('--n_scale', type=int, default=3, help='number of scales for discriminator')
    parser.add_argument('--ngf', type=int, default=64, help='number of channels in the generator')
    parser.add_argument('--ndf', type=int, default=64, help='number of channels in the discriminator')
    parser.add_argument('--nb_blocks', type=int, default=9, help='number of residual blocks for the generator')
    parser.add_argument('--G', default='increase', help='which generator to use')
    parser.add_argument('--D', default='MSD_global',  help='which discriminator to use')
    parser.add_argument('--D_dir', default='MSD_global',  help='which discriminator to use')
    parser.add_argument('--trainer', default='adv_increase_sftmx', help='which trainer to use')
    parser.add_argument('--get_data_G', default='get_data_coco', help='which trainer to use')
    parser.add_argument('--get_data_D', default='get_data_mix', help='which trainer to use')
    parser.add_argument('--dataset', default='coco', help='name of dataset used')
    parser.add_argument('--dataloader', default='CoCoLoader_rectangle', help='which dataloader to use')
    parser.add_argument('--dataloader_mix', default='Adobe5kCoCoLoader', help='which dataloader to use for D')
    parser.add_argument('--random_s', type=int, default=1, help='random s values or fixed')
    parser.add_argument('--hinge_lb', type=float, default=0.02, help='lower bound on hinge loss')
    parser.add_argument('--hinge_ub', type=float, default=0.1, help='upper bound on hinge loss')
    parser.add_argument('--interpolate', type=int, default=0, help='upper bound on hinge loss')
    parser.add_argument('--drop_remainder', type=int, default=1, help='drop remainder or not')
    parser.add_argument('--shape1', type=int, default=240, help='width of image')
    parser.add_argument('--shape2', type=int, default=320, help='height of image')
    parser.add_argument('--nb_gpu', type=int, default=1, help='number of gpus')
    parser.add_argument('--donormG', type=str2bool, default=True, help="Apply instance normalization in the generator")
    parser.add_argument('--donormD', type=str2bool, default=False, help="Apply instance normalization in the discriminator")
    parser.add_argument('--sl', type=float, default=0.3, help='tradeoff hyper-parameter')
    parser.add_argument('--zdim', type=int, default=10, help='length of latent variable z')
    parser.add_argument('--shuffle', type=int, default=0, help='shuffle or not')
    parser.add_argument('--video', type=int, default=0, help='use for video inference')
    parser.add_argument('--resave', type=str2bool, default=False, help='re-save the model')
    parser.add_argument('--zbinsize', type=int, default=11, help='the dimension of zbin')
    parser.add_argument('--startdecay', type=int, default=50, help='start decay at x% of total number of iterations')
    parser.add_argument('--nb_neurons', type=int, default=100, help='number of neurons to predict')
    parser.add_argument('--fc_dim', type=int, default=128, help='number of neurons in dense hidden layers')
    args = parser.parse_args()
    if args.traintest == 1:
        print('starting training')
        training_log = trainer(args, parser)
        print('DONE TRAINING')
        args.batch_size = 1
        args.nb_gpu = 1

        args.dataloader = 'CoCoLoader_rectangle_HR'
        args.get_data_G = 'get_data_coco'
        if args.resave ==False:
            tf.keras.backend.clear_session()
            print('START TESTING')
            tester(args, training_log)
            print('DONE TESTING')
    elif args.traintest == 0:
        print('starting training')
        trainer(args, parser)
        print('DONE TRAINING')
    elif args.traintest == 2:
        tf.keras.backend.clear_session()
        print('START TESTING')
        tester(args, args.checkpoint_dir)
        print('DONE TESTING')
    tf.keras.backend.clear_session()
コード例 #12
0
def main():
    """
	take arguments - > data ,embeedding dir, model_ckpt, which embedding to use
	"""
    parser = argparse.ArgumentParser(description="feed me properly")
    parser.add_argument("--data_dir")
    parser.add_argument("--embed_dir")
    #parser.add_argument("--embed_type")
    parser.add_argument("--do_preprocess", type=str, default="True")
    parser.add_argument("--use_extra_features")
    parser.add_argument("--validate", default=True)
    parser.add_argument("--local_validation", type=str, default="True")
    parser.add_argument("--lower_text")
    parser.add_argument("--max_vocab_words", type=int, default=90000)
    parser.add_argument("--max_seq_len", type=int, default=70)
    parser.add_argument("--filters")
    parser.add_argument("--use_embeddings", default="False")
    parser.add_argument("--load_glove", default=None)
    parser.add_argument("--load_fast", default=None)
    parser.add_argument("--load_para", default=None)
    parser.add_argument("--single_matrix", default=None)
    parser.add_argument("--mean_matrix", default=None)
    parser.add_argument("--concat_matrix", default=None)
    parser.add_argument("--splits", type=int, default=2)
    parser.add_argument("--epochs", type=int, default=5)
    parser.add_argument("--batch_size", type=int, default=512)

    parser.add_argument("--save_result", type=str, default=True)
    parser.add_argument("--result_dir", type=str, default="./")
    args = parser.parse_args()

    # Load data
    train = pd.read_csv(args.data_dir + "/train.csv")[:10000]
    test = pd.read_csv(args.data_dir + "/test.csv")[:100]
    n_test = len(test) * 3

    X_local_test = None
    Y_local_test = None
    train_feats = np.zeros((train.shape[0], 2))
    test_feats = np.zeros((test.shape[0], 2))
    local_test_feats = np.zeros((n_test, 2))

    if (args.do_preprocess == "True"):
        print("--------preprocessing------------")
        preproc = preprocess(train, test, args.use_extra_features,
                             args.lower_text)
        train, test, train_feats, test_feats = preproc.FullPreprocessing()
    print("after preprocess ", train.shape, test.shape)

    #Local Validation
    if (args.local_validation == "True"):
        print("--------preparing cross_validation------------")
        temp = train.copy()
        train = temp.iloc[:-n_test]
        X_local_test = temp.iloc[-n_test:]
        del temp
        Y_local_test = X_local_test.loc[:, "target"].values
        print("in local_val ", train.shape, X_local_test.shape)
        temp = train_feats.copy()
        train_feats = temp[:-n_test]
        local_test_feats = temp[-n_test:]
        del temp
        #train_feats, local_test_feats = (train_feats[:-n_test],
        #	                            train_feats[-n_test:])

    Y_train = train.loc[:, "target"].values

    #Tokenizer
    print("-------tokenizing------------")
    #print("to tokenizer ", train.shape, test.shape, X_local_test.shape)
    if (X_local_test is not None):
        tok = TokenizerBase(train["question_text"], test["question_text"],
                            X_local_test["question_text"], args.lower_text,
                            args.filters, args.max_vocab_words,
                            args.max_seq_len)
    else:
        tok = TokenizerBase(train["question_text"], test["question_text"],
                            None, args.lower_text, args.filters,
                            args.max_vocab_words, args.max_seq_len)
    X_train, X_test, X_local_test, vocab = tok.FullTokenizer()

    #Embedding and Modelling
    if (args.use_embeddings == "True"):
        print("-------------loading embeddings----------------")
        print(args.use_embeddings)
        embedder = Embedder(args.embed_dir)
        embedder.LoadIndexFile(args.load_glove, args.load_fast, args.load_para,
                               args.embed_dir)

        if (args.single_matrix):
            single_matrix = embedder.GetSingleMatrix(args.single_matrix, vocab)
            if (args.use_extra_features == "True"):
                model = ModelWithFeats(300, args.max_vocab_words,
                                       args.max_seq_len, single_matrix)
            else:
                model = Model(300, single_matrix)
        elif (args.concat_matrix == "True"):
            embed1, embed2 = args.concat_matrix.split()
            concat_matrix = embedder.GetConcatMatrix(embed1, embed2, vocab)
            if (args.use_extra_features):
                model = ModelWithFeats(300, args.max_vocab_words,
                                       args.max_seq_len, concat_matrix)
            else:
                model = Model(601, args.max_vocab_words, args.max_seq_len,
                              concat_matrix)
        elif (args.mean_matrix == "True"):
            embed1, embed2 = args.mean_matrix.split()
            mean_matrix = embedder.GetMeanMatrix(embed1, embed2, vocab)
            if (args.use_extra_features == "True"):
                model = ModelWithFeats(300, args.max_vocab_words,
                                       args.max_seq_len, mean_matrix)
            else:
                model = Model(300, args.max_vocab_words, args.max_seq_len,
                              mean_matrix)
    else:
        print("skipping loading embeddings")
        if (args.use_extra_features == "True"):
            model = ModelWithFeats(300, args.max_vocab_words, args.max_seq_len)
        else:
            model = Model(300, args.max_vocab_words, args.max_seq_len)

    #Load Data
    #getData(test_X, local_test_X, train_X ,train_Y, test_feats, lcoal_test_feats , n_splits = 3)
    #print(X_test.shape, X_local_test.shape, X_train.shape, Y_train.shape, test_feats.shape)
    print("-----------generating data-------------")
    test_loader, local_test_loader, splits, train_preds, test_preds, local_test_preds = GetData(
        X_test, X_local_test, X_train, Y_train, test_feats, local_test_feats,
        args.splits, args.batch_size, args.use_extra_features)

    logger = pd.DataFrame()

    print("-----------starting training--------------")
    train_glove, test_glove, local_test_glove = trainer(
        splits, model, X_train, Y_train, args.epochs, test_loader,
        local_test_loader, train_preds, test_preds, local_test_preds,
        train_feats, args.batch_size, args.validate, args.use_extra_features,
        logger)

    op = threshold_search(Y_train, train_glove)
    logger.loc[logger.shape[0] - args.epochs, "final_train_f1"] = op["f1"]

    best = metrics.f1_score(Y_local_test,
                            local_test_glove.mean(axis=1) > op["threshold"])
    logger.loc[logger.shape[0] - args.epochs, "mean_local_test_f1"] = best

    s = pd.DataFrame(test_glove).corr()
    a = []
    for i in range(s.shape[0]):
        for j in range(s.shape[1]):
            if (i != j):
                a.append(s.iloc[i, j])
    logger.loc[logger.shape[0] - args.epochs, "test_corr"] = np.mean(a)
    logger.loc[logger.shape[0], :] = "-"
    print(best)

    print(logger)

    if (args.save_result == "True"):
        logger.to_csv(args.result_dir + "/glove_only.csv")
        s.to_csv(args.result_dir + "/glove_only_corr.csv")
コード例 #13
0
ファイル: main.py プロジェクト: Senpai1199/nnfl-bitsf312
from LiarLiar import *
from trainer import *

# Your code goes here.
liar_dataset_train = dataset(prep_Data_from='train')
liar_dataset_val = dataset(prep_Data_from='val')

sentence_length, justification_length = liar_dataset_train.get_max_lenghts()

dataloader_train = DataLoader(dataset=liar_dataset_train, batch_size=50)
dataloader_val = DataLoader(dataset=liar_dataset_val, batch_size=25)

statement_encoder = Encoder(conv_layers=5, hidden_dim=512)
justification_encoder = Encoder(conv_layers=5, hidden_dim=512)

multiheadAttention = MultiHeadAttention(hid_dim=512, n_heads=32)
positionFeedForward = PositionFeedforward(hid_dim=512, feedForward_dim=2048)

model = arePantsonFire(statement_encoder, justification_encoder, multiheadAttention, positionFeedForward, 512, sentence_length, justification_length, liar_dataset_train.embedding_dim)

trainer(model, dataloader_train, dataloader_val, num_epochs=1, train_batch=1, test_batch=1)

# Do not change module_list , otherwise no marks will be awarded
module_list = [liar_dataset_train, liar_dataset_val, dataloader_train, dataloader_val, statement_encoder, justification_encoder, multiheadAttention, positionFeedForward, model]
del  liar_dataset_val, liar_dataset_train, dataloader_train, dataloader_val


liar_dataset_test = dataset(prep_Data_from='test')
test_dataloader = DataLoader(dataset=liar_dataset_test, batch_size=1)
infer(model=model, dataloader=test_dataloader)
コード例 #14
0
positionFeedForward = PositionFeedforward(512, 2048)
model = arePantsonFire(sentence_encoder=statement_encoder,
                       explanation_encoder=justification_encoder,
                       multihead_Attention=multiheadAttention,
                       position_Feedforward=positionFeedForward,
                       hidden_dim=512,
                       max_length_sentence=sentence_length,
                       max_length_justification=justification_length,
                       input_dim=200,
                       device='cuda:0')
#if num_epochs = 1 then it will run for 1 loop
trainer(model,
        dataloader_train,
        dataloader_val,
        1,
        path_to_save='/home/atharva',
        checkpoint_path='/home/atharva',
        checkpoint=100,
        train_batch=1,
        test_batch=1)

# Do not change module_list , otherwise no marks will be awarded
module_list = [
    liar_dataset_train, liar_dataset_val, dataloader_train, dataloader_val,
    statement_encoder, justification_encoder, multiheadAttention,
    positionFeedForward, model
]
del liar_dataset_val, liar_dataset_train, dataloader_train, dataloader_val

liar_dataset_test = dataset(prep_Data_from='test')
test_dataloader = DataLoader(dataset=liar_dataset_test, batch_size=1)
コード例 #15
0
from modeling.layers.PNet import *
from trainer import *

if __name__ == '__main__':
    net = PNet()
    trainer = Trainer(net, data_path='data/12', save_path='weights/pnet.pth', batch_size=512)
    trainer(stop_value=0.01)
コード例 #16
0
y = df.iloc[:,3].as_matrix()
x = df.drop(df.columns[[0, 1, 2, 3, 4, 5]], axis=1).as_matrix()

testX = x.tolist()[trainSize:]
testY = y.tolist()[trainSize:]
trainX = x.tolist()[:trainSize]
trainY = y.tolist()[:trainSize]

trainX = np.array([i for i in trainX])
testX = np.array([i for i in testX])
trainY = np.array([[i] for i in trainY])
testY = np.array([[i] for i in testY])

<<<<<<< HEAD:WorldHappiness/nn_wh.py
NN = NeuralNetwork(8, 1, 11)
T = trainer(NN)
T.train(trainX, trainY)

yHat = NN.forward(testX)
NN.printWeights()
print(yHat)
print(testY)
=======
NN = NeuralNetwork(7, 1, 11)

#yHat = NN.forward(trainX[0])

NN.costPrime(trainX[0], trainY[0])

#T = trainer(NN)
#T.train(trainX, trainY)
コード例 #17
0
        test_data = np.vstack(test_data)
        test_labels = np.concatenate(test_labels, axis=0)
        break

datasess.run(testset.iterator.initializer)

#model_ = Autoencoder(n_z=32, loss_type='l2', use_conv=True)
#model_.load_weights("./models/conv_noise/model.ckpt")

model = trainer(Autoencoder,
                image,
                label,
                n_z=args.nz,
                num_epoch=30,
                train_size=num_sample,
                datasess=datasess,
                test_data=test_data,
                alpha_r=3.,
                loss_type=args.rec_loss,
                use_conv=True,
                with_noise=False,
                bench_model=None,
                pre_mode=args.pre_mode)
model_dir = './models/conv_log/'
os.makedirs(model_dir)
model.save_weights(model_dir + "model.ckpt")
# for alpha in [1,3,5,10]:
#     # Train a new model
#     model = trainer(Autoencoder, image, label, n_z=args.nz, num_epoch=args.epoch, train_size=num_sample, datasess=datasess, test_data=test_data, alpha_r=alpha,
#                 loss_type=args.rec_loss, use_conv=True, with_noise=True, bench_model=model_)

#     model_dir = './models/nz_{}/alpha_{}'.format(int(args.nz), int(alpha))