Example #1
0
 def __init__(self,
              obs_dim,
              latent_dim,
              hidden_dim=600,
              bidirectional=True,
              num_layers=2,
              min_var=1e-4,
              dropout_ratio=args.dropout):
     super(StateEncoder, self).__init__()
     self.obs_dim = obs_dim
     self.latent_dim = latent_dim
     self.hidden_dim = hidden_dim
     if bidirectional:
         print("Using Bidir in EncoderLSTM")
     self.num_directions = 2 if bidirectional else 1
     self.num_layers = num_layers
     self.lstm = nn.LSTM(obs_dim,
                         hidden_dim // self.num_directions,
                         self.num_layers,
                         batch_first=True,
                         dropout=dropout_ratio,
                         bidirectional=bidirectional)
     self.attention_layer = model.SoftDotAttention(hidden_dim, obs_dim)
     self.post_lstm = nn.LSTM(hidden_dim,
                              hidden_dim // self.num_directions,
                              self.num_layers,
                              batch_first=True,
                              dropout=dropout_ratio,
                              bidirectional=bidirectional)
     self.mean_network = model.MLP(hidden_dim, latent_dim)
     self.log_var_network = model.MLP(hidden_dim, latent_dim)
     # self.min_log_var = Variable(np.log(np.array([min_var])).astype(np.float32))
     self.min_log_var = torch.from_numpy(
         np.log(np.array([min_var])).astype(np.float32)).cuda()
Example #2
0
def gen_models():
    name1 = "10category_top3acc"
    model_name1 = "MLP"
    total_name1 = model_name1 + "_" + name1
    name2 = "8category_top3acc_extend"
    model_name2 = "MLP1"
    total_name2 = model_name2 + "_" + name2
    with tf.name_scope(model_name1) as scope1:
        sess1 = tf.Session()
        model1 = mod.MLP(name=model_name1,
                         sess=sess1,
                         scope=scope1,
                         output_dim=10,
                         input_dim=10)
        model1.init2()
        saver1 = tf.train.Saver(model1.var)
        save_path = abs_path + "/save/" + total_name1 + "/" + total_name1 + ".ckpt"
        saver1.restore(sess1, save_path=save_path)
    with tf.name_scope(model_name2) as scope2:
        sess2 = tf.Session()
        model2 = mod.MLP(name=model_name2,
                         sess=sess2,
                         scope=scope2,
                         output_dim=8,
                         input_dim=5)
        model2.init2()
        saver2 = tf.train.Saver(model2.var)
        save_path1 = abs_path + "/save/" + total_name2 + "/" + total_name2 + ".ckpt"
        saver2.restore(sess2, save_path=save_path1)
    return sess1, model1, sess2, model2
Example #3
0
 def train(self, dataX, dataY):
     data = dataset.MLPOnlineDataset(dataX=dataX, dataY=dataY)
     self.model_lstm = model.MLP(input_size=3, output_size=1)
     self.experiment = Experiment(config=self.config,
                                  model=self.model_lstm,
                                  dataset=data)
     self.experiment.run()
Example #4
0
def cross_validation(args):
    for iter in range(1, args.num_models + 1):
        args.lr = 10**np.random.uniform(-5, -1)
        args.weight_decay = 10**np.random.uniform(-5, 1)
        if args.modeltype in ['LR', 'MLP']:
            args.factor = np.random.randint(90, 111)
            model = models.LR(args) if args.modeltype == 'LR'\
                else models.MLP(args)
            print('{}, Model: {}, lr: {:.5f}, wd: {:.5f}, factor: {}'.format(
                iter, args.modeltype, args.lr, args.weight_decay, args.factor))
            f = open(args.filepath, 'a')
            f.write('%d, Model: %s, lr: %.5f, wd: %.5f, factor: %d\n' %
                    (iter, args.modeltype, args.lr, args.weight_decay,
                     args.factor))
            f.close()
        elif args.modeltype in ['CNN', 'CNNDeep']:
            args.embed_dim = np.random.randint(90, 111)
            args.kernel_num = np.random.randint(90, 111)
            args.dropout = np.random.uniform(0.1, 0.5)
            model = models.CNN(args) if args.modeltype == 'CNN'\
                else models.CNNDeep(args)
            print('{}, Model: {}, lr: {:.5f}, wd: {:.5f}, embed_dim: {},\
                  kernel_num: {}, dropout: {:.5f}'.format(
                iter, args.modeltype, args.lr, args.weight_decay,
                args.embed_dim, args.kernel_num, args.dropout))
            f = open(args.filepath, 'a')
            f.write('%d, Model: %s, lr: %.5f, wd: %.5f, embed_dim: %d,\
                    kernel_num: %d, dropout: %.5f\n' %
                    (iter, args.modeltype, args.lr, args.weight_decay,
                     args.embed_dim, args.kernel_num, args.dropout))
            f.close()
        train_and_evaluate(args, model)
Example #5
0
 def __init__(self,
              obs_dim,
              latent_dim,
              view_num,
              path_len=2,
              hidden_dim=256,
              bidirectional=False,
              num_layers=1,
              dropout_ratio=args.dropout):
     super(StateDecoder, self).__init__()
     self.obs_dim = obs_dim
     self.latent_dim = latent_dim
     self.path_len = path_len
     self.hidden_dim = hidden_dim
     self.num_directions = 2 if bidirectional else 1
     self.num_layers = num_layers
     self.view_num = view_num
     self.view_atten = model.SelfAttention(obs_dim + latent_dim, hidden_dim)
     self.fc1 = nn.Linear(obs_dim + latent_dim, hidden_dim)
     self.relu1 = nn.LeakyReLU()
     self.h_size = 1
     if bidirectional:
         self.h_size += 1
     self.h_size *= num_layers
     self.lstm = nn.LSTM(hidden_dim,
                         hidden_dim,
                         num_layers,
                         batch_first=True,
                         dropout=dropout_ratio,
                         bidirectional=bidirectional)
     self.mean_network = model.MLP(hidden_dim, obs_dim * view_num)
     self.log_var_network = model.Parameter(obs_dim * view_num,
                                            init=np.log(0.1))
    def load_checkpoint(self, ckpt):

        if os.path.isfile(ckpt):

            ckpt = torch.load(ckpt, map_location=lambda storage, loc: storage)
            # Load model state
            if self.model.input_size != ckpt[
                    'input_size'] or self.model.n_hidden != ckpt[
                        'n_hidden'] or self.model.hidden_size != ckpt[
                            'hidden_size'] or self.model.dropout_prob != ckpt[
                                'dropout_prob']:
                print('Reinstantiating model with correct configuration')
                self.model = model_.MLP(n_in=ckpt['input_size'],
                                        nh=ckpt['n_hidden'],
                                        n_h=ckpt['hidden_size'],
                                        dropout_prob=ckpt['dropout_prob'])
                print(self.model)
            self.model.load_state_dict(ckpt['model_state'])
            # Load optimizer state
            self.optimizer.load_state_dict(ckpt['optimizer_state'])
            # Load history
            self.history = ckpt['history']
            self.total_iters = ckpt['total_iters']
            self.cur_epoch = ckpt['cur_epoch']
            if self.cuda_mode:
                self.model = self.model.to(self.device)

        else:
            print('No checkpoint found at: {}'.format(ckpt))
Example #7
0
def set_model(model_file, args):

    # Define the Model
    print(args.xvars)
    args.region = args.data_region
    # in_features = (args.nlevs*(len(args.xvars)-3)+3)
    # if not args.train_on_y2:
    #     nb_classes = (args.nlevs*(len(args.yvars)))
    # else:
    #     nb_classes = (args.nlevs*(len(args.yvars2)))
    in_features = args.in_features
    nb_classes = args.nb_classes
    hidden_size = args.hidden_size
    # n_inputs,n_outputs=140,70
    # in_features, nb_classes=(args.nlevs*4+3),(args.nlevs*2)
    # hidden_size = int(1. * in_features + nb_classes)
    mlp = nn_model.MLP(in_features, nb_classes, args.nb_hidden_layers,
                       hidden_size)
    # mlp = nn_model.MLPSkip(in_features, nb_classes, args.nb_hidden_layers, hidden_size)
    # mlp = nn_model.MLPDrop(in_features, nb_classes, args.nb_hidden_layers, hidden_size)
    # Load the save model
    print("Loading PyTorch model: {0}".format(model_file))
    checkpoint = torch.load(model_file, map_location=torch.device('cpu'))
    mlp.load_state_dict(checkpoint['model_state_dict'])
    mlp.eval()
    print("Model's state_dict:")
    for param_tensor in mlp.state_dict():
        print(param_tensor, "\t", mlp.state_dict()[param_tensor].size())
    return mlp
Example #8
0
def test():
    covs = return_covs()
    with tf.name_scope("hellokugou") as scope:
        if not os.path.exists("models/hellokugou"):
            os.mkdir("models/hellokugou")
        sess = tf.Session()
        model = mod.MLP(name="hellokugou",
                        sess=sess,
                        scope=scope,
                        output_dim=10)
        train_data, train_label, test_data, test_label, test_data_ = prepare_data(
            10, gen_label=True, static_box=True, covs=covs)
        model.init1(train_data, train_label, test_data, test_label)
        ac = []
        # for i in range(0, 20):
        acc, y_, output_label, ground_truth_label, out_pro = model.run1(
            name="with_genre")
        ac.append(acc)
        total = np.asarray(
            np.concatenate(
                (test_data_, np.asmatrix(ground_truth_label).transpose(),
                 np.asmatrix(output_label).transpose(), y_), 1))
        print(total)
        print(acc)
        print(np.mean(ac))
Example #9
0
def restore():
    with tf.name_scope("hellokugou") as scope:
        covs = return_covs()
        sess = tf.Session()
        model = mod.MLP(name="hellokugou",
                        sess=sess,
                        scope=scope,
                        output_dim=10)
        train_data, train_label, test_data, test_label, test_data_ = prepare_data(
            10, gen_label=True, static_box=True, covs=covs)
        model.init1(train_data, train_label, test_data, test_label)
        acc, y_, output_label, ground_truth_label, out_pro = model.run1(
            name="with_genre", steps=1, is_save=True)
        scaler = pre.MinMaxScaler()
        out_pro = np.asarray(out_pro, dtype=np.float32).transpose()
        out_pro = scaler.fit_transform(out_pro).transpose()
        sums = np.asmatrix(np.sum(out_pro, axis=1))
        out_pro = out_pro / sums.transpose()
        total = np.asarray(
            np.concatenate(
                (test_data_, np.asmatrix(ground_truth_label).transpose(),
                 np.asmatrix(output_label).transpose(),
                 np.asmatrix(out_pro, dtype=np.float32)), 1))
        print(total)
        print(acc)
        # print(out_pro)
        # print(np.sum(out_pro, axis=1))
    return total, out_pro
Example #10
0
def set_model(args):
    mlp = model.MLP(args.in_features, args.nb_classes, args.nb_hidden_layers,
                    args.hidden_size)
    # mlp = model.MLP_BN(in_features, nb_classes, nb_hidden_layer, hidden_size)
    pytorch_total_params = sum(p.numel() for p in mlp.parameters()
                               if p.requires_grad)
    print("Number of traninable parameter: {0}".format(pytorch_total_params))

    if args.loss == "mae":
        loss_function = torch.nn.functional.l1_loss  #torch.nn.L1Loss()
    elif args.loss == "mse":
        loss_function = torch.nn.functional.mse_loss  #torch.nn.MSELoss()
    elif args.loss == "mink":
        loss_function = minkowski_error
    optimizer, scheduler = configure_optimizers(mlp)

    if args.warm_start:
        # Load the save model
        checkpoint = torch.load(args.locations['model_loc'] + '/' +
                                args.model_name,
                                map_location=args.device)
        mlp.load_state_dict(checkpoint['model_state_dict'])
        mlp.to(args.device)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        loss = checkpoint['loss']
        epoch = checkpoint['epoch']
        args.model_name = args.model_name.replace(
            '.tar', '_{0}.tar'.format(args.region))
    else:
        mlp.to(args.device)
    print("Model's state_dict:")
    for param_tensor in mlp.state_dict():
        print(param_tensor, "\t", mlp.state_dict()[param_tensor].size())

    return mlp, loss_function, optimizer, scheduler
    def train(self,dataN):
        dataY = dataN[self.filterSize:]#self.dataToClassFunc(dataN[self.filterSize:], self.thresholding)
        dataX = self.featureExtraction(dataN)

        data = dataset.MLPOnlineDataset(dataX=dataX, dataY=dataY)
        self.model_mlp = model.MLP(input_size=40, output_size=1)
        self.experiment = Experiment(config=self.config, model=self.model_mlp, dataset=data)
        self.experiment.run()
Example #12
0
def load_npz_file_to_model(npz_filename='model.npz'):
    # Create model object first
    model1 = model.MLP()

    # Load the saved parameters into the model object
    chainer.serializers.load_npz(npz_filename, model1)
    print('{} loaded!'.format(npz_filename))

    return model1
Example #13
0
def load_hdf5_file_to_model(hdf5_filename='model.h5'):
    # Create another model object first
    model2 = model.MLP()

    # Load the saved parameters into the model object
    chainer.serializers.load_hdf5(hdf5_filename, model2)
    print('{} loaded!'.format(hdf5_filename))

    return model2
Example #14
0
def test():
    dataset = data_tool.load(file_path)
    word2id = dataset["dict"]

    embedding = nn.Embedding(len(word2id), gru_hidden_size)
    embedding.load_state_dict(embedding_sd)
    gru = model.BiGRU(gru_hidden_size, embedding, gru_n_layers)
    gru.load_state_dict(gru_sd)
    ffnn = model.MLP(gru_hidden_size * ffnn_init_num * 2, ffnn_hidden_size, 2,
                     ffnn_n_layers)
    ffnn.load_state_dict(ffnn_sd)

    gru = gru.to(device)
    ffnn = ffnn.to(device)

    gru.eval()
    ffnn.eval()

    ref_src = doc2ID(os.path.join(args.reference, "ref.utr"), word2id)
    ref_tgt = doc2ID(os.path.join(args.reference, "ref.rep"), word2id)

    for i in trange(len(ref_src), ncols=0):
        hyp_src = doc2ID(
            os.path.join(args.hypothesis, "candidate_" + str(i) + "_utr.txt"),
            word2id)
        hyp_tgt = doc2ID(
            os.path.join(args.hypothesis, "candidate_" + str(i) + "_rpl.txt"),
            word2id)
        input_var = []
        input_var.append([ref_src[i] for _ in hyp_src])
        input_var.append([ref_tgt[i] for _ in hyp_src])
        input_var.append(hyp_src)
        input_var.append(hyp_tgt)
        input_var = [inputs for inputs in zip(*input_var)]

        dummy_label = [1] * len(input_var)
        pres = []
        probs = []
        data_iterator = data_tool.iterator(input_var,
                                           dummy_label,
                                           batch_size,
                                           random=False)
        for data, length, _ in data_iterator:
            with torch.no_grad():
                data = data_tool.padding(data)
                predictions, probability = modelAll(data, length, gru, ffnn,
                                                    embedding)
                pres += [int(label) for label in predictions]
                probs += [float(prob) for prob in probability[0].cpu()]

        with open(
                os.path.join(args.save_dir,
                             "candidate_" + str(i) + "_predictions.txt"),
                "w") as f:
            for label, prob in zip(pres, probs):
                f.write(str((label - 0.5) * 2 * (prob - 0.5) * 2) + "\r\n")
Example #15
0
def final_run(args):
    if args.modeltype == 'LR':
        model = models.LR(args)
    elif args.modeltype == 'MLP':
        model = models.MLP(args)
    elif args.modeltype == 'CNN':
        model = models.CNN(args)
    elif args.modeltype == 'CNNDeep':
        model = models.CNNDeep(args)
    train_and_evaluate(args, model)
Example #16
0
    def train(self,dataN):
        dataY = dataN[self.filterSize:]
        dataX = self.featureExtraction(dataN)

        print("dataX",dataX.shape)
        print("dataY",dataY.shape)

        data = dataset.MLPOnlineDataset(dataX=dataX, dataY=dataY)
        self.model_mlp = model.MLP(input_size=11, output_size=1)
        self.experiment = Experiment(config=self.config, model=self.model_mlp, dataset=data)
        self.experiment.run()
Example #17
0
def init_model(model_type):
    model = []
    if model_type == 'LeNet5':
        model = fedmodel.LeNet5()
    elif model_type == 'MLP':
        model = fedmodel.MLP()
    elif model_type == 'ResNet18':
        model = fedmodel.ResNet18()
    elif model_type == 'CNN':
        model = fedmodel.CNN()
    return model
Example #18
0
def main():
    # if gpu is to be used
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # data object
    data = dl.Dataset(DATA_OPTIONS, device)

    # Pytorch model
    mlp = model.MLP()

    print('hi')
Example #19
0
def restore_model():
    save_path = os.path.normpath('%s/%s' % (abs_path, 'save/hellokugou_'))
    with tf.name_scope("hellokugou") as scope:
        sess = tf.Session()
        model = mod.MLP(name="hellokugou",
                        sess=sess,
                        scope=scope,
                        output_dim=10,
                        input_dim=10)
        model.init2()
        saver = tf.train.Saver()
        name = "with_genre"
        saver.restore(sess, save_path=save_path + name + ".ckpt")
        return sess, model
def inference():
    thandler = trainer.handler(args.process_command())

    rt_data = rt()
    data = trainer.load_data(rt_data.data, data_type=rt_data.data_type)
    test_loader = data

    model_ = models.MLP(300, classes=2)
    #print(model_)
    total = sum(p.numel() for p in model_.parameters() if p.requires_grad)
    print('# of para: {}'.format(total))

    model_name = 'MLP.pt'

    predicted = thandler.predict(model_, test_loader, model_name)

    print([np.argmax(np.array(i)) for i in predicted])
Example #21
0
def main():
    print()

    examples_train, examples_test = dataset.Read_Data("./data/" + pb.source +
                                                      ".csv")

    pb.LABELS = sorted(set([example.label for example in examples_train]))
    print(pb.source, pb.LABELS, len(examples_train), len(examples_test))

    textEmd = representation.Representator(examples_train)

    data_filepath = "./data/" + pb.source + "_bert"
    if (os.path.exists(data_filepath) == True):
        [examples_train, examples_test] = pb.Pickle_Read(data_filepath)

    textEmd.Get_Representations(examples_train)
    textEmd.Get_Representations(examples_test)
    # pb.Pickle_Save([examples_train, examples_test], data_filepath)

    textEmd.Get_nGram_Representations(examples_train)
    textEmd.Get_nGram_Representations(examples_test)
    # pb.Pickle_Save([examples_train, examples_test], data_filepath)

    examples_train = dataset.Get_Balanced_Data(examples_train)

    xs_train, gs_train, ys_train = dataset.Get_Encoded_Data(examples_train)
    xs_test, gs_test, ys_test = dataset.Get_Encoded_Data(examples_test)

    # from sklearn import svm
    # clf = svm.SVC()
    # clf.fit(xs_train, ys_train)
    # pre_labels = clf.predict(xs_test)
    # recall, precision, macrof1, microf1, acc = pb.Get_Report(ys_test, pre_labels, pb.LABELS, 2)
    # print("recall:{:.4%}    precision:{:.4%}    macrof1:{:.4%}    microf1:{:.4%}".format(recall, precision, macrof1, microf1))

    print()

    mlp_model = model.MLP(xs_train, gs_train)
    rep_width, add_width, best_ma, best_mi = mlp_model.train(
        xs_train, gs_train, ys_train, xs_test, gs_test, ys_test)

    return rep_width, add_width, best_ma, best_mi
Example #22
0
def main():
    trainX, trainY, testX, testY = load_mnist()
    print("Shapes: ", trainX.shape, trainY.shape, testX.shape, testY.shape)
    
    epochs = 25
    num_hidden_units = 300 
    minibatch_size = 100  
    regularization_rate = 0.01 
    learning_rate = 0.001 

    model = model.MLP(num_hidden_units, minibatch_size, regularization_rate, learning_rate)

    print("Starting training..........")
    model.train(trainX, trainY, epochs)
    print("Training complete")

    print("Starting testing..........")
    labels = model.test(testX)
    accuracy = np.mean((labels == testY)) * 100.0
    print("\nTest accuracy: %lf%%" % accuracy)
Example #23
0
File: gui.py Project: NepTuNew/MLP
    def startTrain(self):
        train, label, test, test_label, max = data_model.loadDataset(
            self.filename)
        mlp = model.MLP(float(self.lrEntry.get()), len(train[0]),
                        int(self.hiddenEntry.get()),
                        int(self.layerEntry.get()), label[0].shape[1])
        for epoch in range(int(self.conditionEntry.get())):
            random_index = np.arange(len(train))
            np.random.shuffle(random_index)
            for i in range(len(train)):
                index = random_index[i]
                mlp.forward(train[index])
                mlp.backpropagate(label[index])
        tp = mlp.precision(train, label)
        rmse = mlp.rmse(train, label)
        self.train = train
        self.label = label
        self.test = test
        self.test_label = test_label

        self.messagebox.insert(END, '#')
        self.messagebox.insert(END, self.count)
        self.messagebox.insert(END, '\n')
        self.messagebox.insert(END, 'Train Precision: ')
        self.messagebox.insert(END, tp)
        self.messagebox.insert(END, '\n')

        if not test == []:
            rmse = mlp.rmse(train, label)
            p = mlp.precision(test, test_label)
            self.messagebox.insert(END, 'Test Precision: ')
            self.messagebox.insert(END, p)
            self.messagebox.insert(END, '\n')
        self.messagebox.insert(END, 'RMSE value: ')
        self.messagebox.insert(END, rmse)
        self.messagebox.insert(END, '\n')
        self.mlp = mlp
        self.count += 1
Example #24
0
def main(args):

    # seed ##############
    np.random.seed(2020)
    torch.manual_seed(1)

    # dataset ##########################################
    if args.dataset == "mnist":
        args.data_path = args.data_path + "mnist/"
        if not os.path.exists(args.data_path):
            os.makedirs(args.data_path)

        test_set = datasets.MNIST(args.data_path, train=False,
                                  download=True,
                                  transform=transforms.Compose([
                                     transforms.Resize([32, 32]),
                                     transforms.ToTensor()]))
        test_loader = torch.utils.data.DataLoader(
            test_set,
            num_workers=32,
            batch_size=args.test_size
        )
    elif args.dataset == "shape":
        test_set = utils.ShapeDataset(data_size=args.test_size)
        test_set.set_seed(2020)
        test_loader = torch.utils.data.DataLoader(
            test_set,
            shuffle=True,
            num_workers=32,
            batch_size=args.test_size
        )
    elif args.dataset == "celeba":
        test_set = utils.ImageFolder(
            args.data_path + '/test/',
            transform=transforms.Compose([transforms.CenterCrop(148),
                                          transforms.Resize([64, 64]),
                                          transforms.ToTensor()]))
        test_loader = torch.utils.data.DataLoader(
            test_set,
            num_workers=32,
            batch_size=args.test_size
        )

    # load model ##########################################

    if args.dataset == "mnist":
        if args.vae:
            enc = model.MNIST_Encoder(args.n * 2)
            dec = model.MNIST_Decoder(args.n, vae=True)
        else:
            enc = model.MNIST_Encoder(args.n)
            dec = model.MNIST_Decoder(args.n)
    elif args.dataset == "celeba":
        if args.vae:
            enc = model.CelebA_Encoder(args.n * 2)
            dec = model.CelebA_Decoder(args.n, vae=True)
        else:
            enc = model.CelebA_Encoder(args.n)
            dec = model.CelebA_Decoder(args.n)
    elif args.dataset == "shape":
        if args.vae:
            enc = model.Shape_Encoder(args.n * 2)
            dec = model.Shape_Decoder(args.n, vae=True)
        else:
            enc = model.Shape_Encoder(args.n)
            dec = model.Shape_Decoder(args.n)

    dec.load_state_dict(torch.load(
        args.checkpoint + "/" + args.dataset + "/dec_" + args.model_name,
        map_location=torch.device('cpu')))
    enc.load_state_dict(torch.load(
        args.checkpoint + "/" + args.dataset + "/enc_" + args.model_name,
        map_location=torch.device('cpu')))
    dec.eval()
    enc.eval()

    if args.l > 0:
        mlp = model.MLP(args.n, args.l)
        mlp.load_state_dict(torch.load(
                args.checkpoint + "/" + args.dataset +
                "/mlp_" + args.model_name,
                map_location=torch.device('cpu')))
        mlp.eval()

    #####################################################

    fig, axs = plt.subplots(args.X, args.Y, figsize=[args.Y, args.X])

    if args.task == "reconstruction":
        yi, _ = next(iter(test_loader))
        if args.vae:
            z_hat = enc(yi)
            mu = z_hat[:, :args.n]
            logvar = z_hat[:, args.n:]
            zi = model.reparametrization(mu, logvar)
        else:
            if args.l > 0:
                zi = mlp(enc(yi))
            else:
                zi = enc(yi)

        y_hat = dec(zi[:args.X * args.Y]).data.numpy()

    elif args.task == "interpolation":
        yi, _ = next(iter(test_loader))

        if args.vae:
            z_hat = enc(yi)
            mu = z_hat[:, :args.n]
            logvar = z_hat[:, args.n:]
            zi = model.reparametrization(mu, logvar)
        else:
            if args.l > 0:
                zi = mlp(enc(yi))
            else:
                zi = enc(yi)

        zs = []
        for i in range(args.X):
            z0 = zi[i*2]
            z1 = zi[i*2+1]

            for j in range(args.Y):
                zs.append((z0 - z1) * j / args.Y + z1)
        zs = torch.stack(zs, axis=0)
        y_hat = dec(zs).data.numpy()

    elif args.task == "mvg":
        z = []
        for yi, _ in test_loader:
            if args.vae:
                z_hat = enc(yi)
                mu = z_hat[:, :args.n]
                logvar = z_hat[:, args.n:]
                zi = model.reparametrization(mu, logvar)
            else:
                if args.l > 0:
                    zi = mlp(enc(yi))
                else:
                    zi = enc(yi)
            z.append(zi.detach().numpy())
        z = np.concatenate(z, axis=0)
        mu = np.average(z, axis=0)
        sigma = np.cov(z, rowvar=False)

        # generate corresponding sample z
        zs = np.random.multivariate_normal(mu, sigma, args.X * args.Y)
        zs = torch.Tensor(zs)

        y_hat = dec(zs).data.numpy()
    elif args.task == "gmm":
        z = []
        for yi, _ in test_loader:
            if args.vae:
                z_hat = enc(yi)
                mu = z_hat[:, :args.n]
                logvar = z_hat[:, args.n:]
                zi = model.reparametrization(mu, logvar)
            else:
                if args.l > 0:
                    zi = mlp(enc(yi))
                else:
                    zi = enc(yi)
            z.append(zi.detach().numpy())
        z = np.concatenate(z, axis=0)
        gmm = mixture.GaussianMixture(
            n_components=args.d, covariance_type='full')
        gmm.fit(z)

        zs, _ = gmm.sample(args.X * args.Y)
        zs = torch.Tensor(zs)
        y_hat = dec(zs).data.numpy()

    elif args.task == "pca":
        z = []
        for yi, _ in test_loader:
            if args.vae:
                z_hat = enc(yi)
                mu = z_hat[:, :args.n]
                logvar = z_hat[:, args.n:]
                zi = model.reparametrization(mu, logvar)
            else:
                if args.l > 0:
                    zi = mlp(enc(yi))
                else:
                    zi = enc(yi)
            z.append(zi.detach().numpy())
        z = np.concatenate(z, axis=0)

        pca = PCA(n_components=args.d)
        pca.fit(z)
        x = pca.transform(z)
        mu = np.average(x, axis=0)
        sigma = np.cov(x, rowvar=False)

        sigma_0 = np.sqrt(sigma[0][0])
        sigma_1 = np.sqrt(sigma[1][1])
        center = mu.copy()
        center[0] -= sigma_0 * 2
        center[1] -= sigma_1 * 2

        zs = []
        for i in range(args.X):
            tmp = []
            x = center.copy()
            x[0] += sigma_0 * i / args.X * 4
            for j in range(args.Y):
                x[1] += sigma_1 / args.Y * 4
                zi = pca.inverse_transform(x)
                tmp.append(zi)
            tmp = np.stack(tmp, axis=0)
            zs.append(tmp)
        zs = np.concatenate(zs, axis=0)
        zs = torch.Tensor(zs)

        y_hat = dec(zs).data.numpy()

    # now plot
    for i in range(args.X):
        for j in range(args.Y):

            if args.dataset == 'mnist':
                im = y_hat[i*args.Y+j][0, :, :]
            else:
                im = np.transpose(y_hat[i*args.Y+j], [1, 2, 0])
            if args.dataset == 'mnist':
                axs[i, j].imshow(1-im, interpolation='nearest', cmap='Greys')
            else:
                axs[i, j].imshow(im, interpolation='nearest')
            axs[i, j].axis('off')

    fig.tight_layout(pad=0.1)

    path = args.save_path + "/" + args.dataset + "/" + args.task
    if not os.path.exists(path):
        os.makedirs(path)
    path += "/" + args.model_name
    plt.savefig(path)
Example #25
0
import chainer
import h5py
import numpy as np

import model

# Create a model object first
model = model.MLP()


def save_parameters_as_npz(model, filename='model.npz'):
    # Save the model parameters into a NPZ file
    chainer.serializers.save_npz(filename, model)
    print('{} saved!\n'.format(filename))

    # Load the saved npz from NumPy and show the parameter shapes
    print('--- The list of saved params in model.npz ---')
    saved_params = np.load('model.npz')
    for param_key, param in saved_params.items():
        print(param_key, '\t:', param.shape)
    print('---------------------------------------------\n')


def save_parameters_as_hdf5(model, filename='model.h5'):
    # Save the model parameters into a HDF5 archive
    chainer.serializers.save_hdf5(filename, model)
    print('model.h5 saved!\n')

    # Load the saved HDF5 using h5py
    print('--- The list of saved params in model.h5 ---')
    f = h5py.File('model.h5', 'r')
Example #26
0
dataset = RegressionDataset(X, Y)

# Reproducibility
if args.seed is not None:
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

net = MLP()
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.wd)

# Load reference net if defined
if args.repulsive is not None:
    reference_net = model.MLP(dropout_rate=args.dropout_rate)
    reference_net.load_state_dict(torch.load(Path(args.repulsive)))

# Update of the network parameters
train_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False)

# Sampling a repulsive bandwidth parameter
alpha = -3
beta = -0.5
bandwidth_repulsive = float(10**(alpha + (beta - alpha) * np.random.rand()))

# Preparation of the optimization
if args.repulsive is not None:
    _optimize = partial(optimize,
                        bandwidth_repulsive=bandwidth_repulsive,
                        lambda_repulsive=args.lambda_repulsive)
Example #27
0
    if os.path.isfile(args.out_path):
        os.remove(args.out_path)
        print(args.out_path + ' Removed')

    print('Cuda Mode is: {}'.format(args.cuda))

    if args.cuda:
        device = get_freer_gpu()
    else:
        device = torch.device('cpu')

    print('Loading model')

    ckpt = torch.load(args.cp_path, map_location=lambda storage, loc: storage)
    model = model_.MLP(n_in=ckpt['input_size'],
                       nh=ckpt['n_hidden'],
                       n_h=ckpt['hidden_size'],
                       dropout_prob=ckpt['dropout_prob'])
    model.load_state_dict(ckpt['model_state'], strict=True)
    model.eval()

    print('Model loaded')

    print('Loading data')

    data = {k.split('-')[0]: m for k, m in read_vec_flt_scp(args.path_to_data)}

    if args.eval:
        test_utts = read_trials(args.trials_path, eval_=args.eval)
    else:
        test_utts, attack_type_list, label_list = read_trials(args.trials_path,
                                                              eval_=args.eval)
Example #28
0
def MLPClassifier(unsmodel, graphs, indexes, rep_dim, batchsize):
    print('start classification')
    split = int(len(indexes) * 0.9)
    graph_train = []
    graph_test = []

    for i in indexes[0:split]:
        graph_train.append(graphs[i])

    for i in indexes[split:len(indexes)]:
        graph_test.append(graphs[i])

    serializers.load_npz(str(rep_dim) + "_model_ptc.npz", unsmodel)

    hid_dim = 150
    out_dim = 2
    mlp = model.MLP(rep_dim, hid_dim, out_dim)
    classifier = model.SoftmaxClassifier(mlp)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(classifier)
    n_epochs = 30

    # training phase
    best = 0.00
    for epoch in range(n_epochs):
        print("epoch:", epoch)
        perm = np.random.permutation(len(graph_train))
        N_train = len(graph_train)
        sum_loss = 0
        sum_accuracy = 0
        for i in range(0, N_train, batchsize):
            maxid = min(i + batchsize, N_train)
            graphids = []
            adjs = []
            atom_arrays = []
            labels = []
            for id in perm[i:maxid]:
                graphids.append(graph_train[id][0])
                adjs.append(graph_train[id][2])
                atom_arrays.append(graph_train[id][1])
                labels.append(graph_train[id][3])
            graphids = np.asarray(graphids)
            adjs = np.asarray(adjs, dtype=np.float32)
            atom_arrays = np.asarray(atom_arrays, dtype=np.int32)
            labels = np.asarray(labels, dtype=np.int32)
            rep_list, counts = unsmodel.extract_fp(graphids, adjs, atom_arrays)
            y = chainer.Variable(labels)
            x = rep_list
            optimizer.update(classifier, x, counts, y)
            sum_loss += float(classifier.loss.data) * len(y.data)
            sum_accuracy += float(classifier.accuracy.data) * len(y.data)
        print("train acc:", sum_accuracy / N_train, "train loss:",
              sum_loss / N_train)
        if best < sum_accuracy:
            serializers.save_npz(str(rep_dim) + "_nn_ptc.npz", classifier)
            best = sum_accuracy

    # test
    graphids = []
    adjs = []
    atom_arrays = []
    labels = []
    serializers.load_npz(str(rep_dim) + "_nn_ptc.npz", classifier)
    for id in range(len(graph_test)):
        graphids.append(graph_test[id][0])
        adjs.append(graph_test[id][2])
        atom_arrays.append(graph_test[id][1])
        labels.append(graph_test[id][3])
    graphids = np.asarray(graphids)
    adjs = np.asarray(adjs, dtype=np.float32)
    atom_arrays = np.asarray(atom_arrays, dtype=np.int32)
    labels = np.asarray(labels, dtype=np.int32)
    rep_list, counts = unsmodel.extract_fp(graphids, adjs, atom_arrays)

    x = rep_list
    y = chainer.Variable(labels)
    print("test acc:", classifier.accuracy.data)
Example #29
0
#
# Processing input
#
print('Normalizing...')

normalization = norm.MinMax()
train_x = normalization.fit_and_normalize(train_x)
test_x = normalization.normalize(test_x)
x_test = normalization.normalize(x_test)

#
# Building the model
#
print("Training...")

mlp = model.MLP(len(columns) - 1, [30], 1, tf.nn.relu)
mlp.optimize(train_x, train_y, steps=10000, x_test=x_test, y_test=y_test)

#
# Testing model
#
print('Predicting...')
test_y_hat = mlp.predict(test_x)

if not is_local_train:
    print("Exporting...")
    result = test_y_hat
    pd.DataFrame({
        'id': test_id,
        'target': result
    }).to_csv('output/prediction/' + strftime("%Y%m%d%H%M%S", gmtime()) +
Example #30
0
def main(args):

    # use gpu ##########################################
    device = torch.device("cuda" if args.gpu else "cpu")
    torch.manual_seed(0)

    # dataset ##########################################
    if args.dataset == "mnist":
        args.data_path = args.data_path + "mnist/"
        if not os.path.exists(args.data_path):
            os.makedirs(args.data_path)

        train_set = datasets.MNIST(args.data_path,
                                   train=True,
                                   download=True,
                                   transform=transforms.Compose([
                                       transforms.Resize([32, 32]),
                                       transforms.ToTensor()
                                   ]))
        valid_set = datasets.MNIST(args.data_path,
                                   train=False,
                                   download=True,
                                   transform=transforms.Compose([
                                       transforms.Resize([32, 32]),
                                       transforms.ToTensor()
                                   ]))
    elif args.dataset == "shape":
        train_set = utils.ShapeDataset(data_size=args.train_size)
        valid_set = utils.ShapeDataset(data_size=args.valid_size)
    elif args.dataset == "celeba":
        train_set = utils.ImageFolder(args.data_path + '/train/',
                                      transform=transforms.Compose([
                                          transforms.CenterCrop(148),
                                          transforms.Resize([64, 64]),
                                          transforms.ToTensor()
                                      ]))
        valid_set = utils.ImageFolder(args.data_path + '/val/',
                                      transform=transforms.Compose([
                                          transforms.CenterCrop(148),
                                          transforms.Resize([64, 64]),
                                          transforms.ToTensor()
                                      ]))

    train_loader = torch.utils.data.DataLoader(train_set,
                                               num_workers=32,
                                               batch_size=args.batch_size)
    valid_loader = torch.utils.data.DataLoader(valid_set,
                                               num_workers=32,
                                               batch_size=args.batch_size)

    # init networks ##########################################

    if args.dataset == "mnist":
        if args.vae:
            enc = model.MNIST_Encoder(args.n * 2)
            dec = model.MNIST_Decoder(args.n, vae=True)
        else:
            enc = model.MNIST_Encoder(args.n)
            dec = model.MNIST_Decoder(args.n)
    elif args.dataset == "celeba":
        if args.vae:
            enc = model.CelebA_Encoder(args.n * 2)
            dec = model.CelebA_Decoder(args.n, vae=True)
        else:
            enc = model.CelebA_Encoder(args.n)
            dec = model.CelebA_Decoder(args.n)
    elif args.dataset == "shape":
        if args.vae:
            enc = model.Shape_Encoder(args.n * 2)
            dec = model.Shape_Decoder(args.n, vae=True)
        else:
            enc = model.Shape_Encoder(args.n)
            dec = model.Shape_Decoder(args.n)

    dec.to(device)
    enc.to(device)

    if not args.vae and args.l > 0:
        mlp = model.MLP(args.n, args.l)
        mlp.to(device)

    # optimizer ##########################################
    if args.l > 0:
        optimizer = optim.Adam([
            {
                'params': dec.parameters(),
                'lr': args.lr
            },
            {
                'params': enc.parameters(),
                'lr': args.lr
            },
            {
                'params': mlp.parameters(),
                'lr': args.lr
            },
        ])
    else:
        optimizer = optim.Adam([
            {
                'params': dec.parameters(),
                'lr': args.lr
            },
            {
                'params': enc.parameters(),
                'lr': args.lr
            },
        ])

    # train ################################################
    save_path = args.checkpoint + "/" + args.dataset

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    for e in range(args.epochs):

        recon_loss = 0

        for yi, _, in tqdm(train_loader):
            enc.train()
            dec.train()
            if args.l > 0:
                mlp.train()

            optimizer.zero_grad()

            yi = yi.to(device)
            z_hat = enc(yi)

            if args.vae:
                mu = z_hat[:, :args.n]
                logvar = z_hat[:, args.n:]
                z_bar = model.reparametrization(mu, logvar)
            else:
                if args.l > 0:
                    z_bar = mlp(z_hat)
                else:
                    z_bar = z_hat

            y_hat = dec(z_bar)
            if args.vae:
                loss = F.binary_cross_entropy(y_hat, yi)
            else:
                loss = F.mse_loss(y_hat, yi)
            recon_loss += loss.item()

            if args.vae:
                loss -= args.beta * torch.mean(1 + logvar - mu.pow(2) -
                                               logvar.exp())

            loss.backward()
            optimizer.step()

        recon_loss /= len(train_loader)
        z_norm = np.average(
            np.sqrt(np.sum(z_hat.detach().cpu().numpy()**2, axis=1)))

        print("epoch " + str(e) + '\ttraining loss = ' + str(recon_loss) +
              '\tz norm = ' + str(z_norm))

        # save model ##########################################
        torch.save(enc.state_dict(), save_path + "/enc_" + args.model_name)
        torch.save(dec.state_dict(), save_path + "/dec_" + args.model_name)

        if args.l > 0:
            torch.save(mlp.state_dict(), save_path + "/mlp_" + args.model_name)

        valid_loss = 0

        for yi, _ in tqdm(valid_loader):
            enc.eval()
            dec.eval()

            if args.l > 0:
                mlp.eval()

            yi = yi.to(device)
            z_eval = enc(yi)

            if args.vae:
                mu = z_eval[:, :args.n]
                logvar = z_eval[:, args.n:]
                z_bar_eval = model.reparametrization(mu, logvar)
            else:
                if args.l > 0:
                    z_bar_eval = mlp(z_eval)
                else:
                    z_bar_eval = z_eval
            y_eval = dec(z_bar_eval)

            eval_loss = F.mse_loss(y_eval, yi)
            valid_loss += eval_loss.item()

        valid_loss /= len(valid_loader)

        print("epoch " + str(e) + '\tvalid loss = ' + str(valid_loss))