Пример #1
0
    def __init__(self, word_embeddings_dim, tag_embeddings_dim, vocabulary_size, tag_uniqueCount, label_uniqueCount, pretrainedWordEmbeddings=None, pretrainedTagEmbeddings=None):
        super().__init__()
        
        self.word_embeddings = nn.Embedding(vocabulary_size, word_embeddings_dim)
        if pretrainedWordEmbeddings.any():
            assert pretrainedWordEmbeddings.shape == (vocabulary_size, word_embeddings_dim)
            self.word_embeddings.weight.data.copy_(torch.from_numpy(pretrainedWordEmbeddings))
        
        self.tag_embeddings = nn.Embedding(tag_uniqueCount, tag_embeddings_dim)
        if pretrainedTagEmbeddings.any():
            assert pretrainedTagEmbeddings.shape == (tag_uniqueCount, tag_embeddings_dim)
            self.tag_embeddings.weight.data.copy_(torch.from_numpy(pretrainedTagEmbeddings))
        
        # Save computation time by not training already trained word vectors
#        disableTrainingForEmbeddings(self.word_embeddings, self.tag_embeddings)
        # Now we need to train the embeddings for <root> and <unk>
        
        self.inputSize = word_embeddings_dim + tag_embeddings_dim # The number of expected features in the input x
        self.hiddenSize = self.inputSize #* 2 # 512? is this the same as outputSize?
        self.nLayers = 2
        
        self.biLstm = nn.LSTM(self.inputSize, self.hiddenSize, self.nLayers, bidirectional=True)
        
        self.nDirections = 2
        self.batch = 1 # this is per recommendation
 
        # Input size of the MLP for arcs scores is the size of the output from previous step concatenated with another of the same size
        biLstmOutputSize = self.hiddenSize * self.nDirections
        mlpForScoresInputSize = biLstmOutputSize * 2
        self.mlpArcsScores = MLP(mlpForScoresInputSize, hidden_size=mlpForScoresInputSize, output_size=1)
        
        # MLP for labels
        self.label_uniqueCount = label_uniqueCount
        self.mlpLabels = MLP(mlpForScoresInputSize, hidden_size=mlpForScoresInputSize, output_size=self.label_uniqueCount)
Пример #2
0
    def add_models(self,
                   input_dims: list = None,
                   pose_labels: list = None,
                   freeze=False):
        n = len(self.models) + 1
        if pose_labels is not None:
            self.models += [
                MLP(config=self.config,
                    dimensions=[input_dims[i]],
                    pose_labels=pose_labels[i],
                    name="M" + str(i + n),
                    single_module=0) for i in range(len(input_dims))
            ]
        else:
            self.models += [
                MLP(config=self.config,
                    dimensions=[input_dims[i]],
                    name="M" + str(i + n),
                    single_module=0) for i in range(len(input_dims))
            ]
        if freeze:
            for model in self.active_models:
                model.freeze(True)
            self.active_models = self.models[n - 1:]
            self.input_dims = input_dims
        else:
            self.active_models = self.models
            self.input_dims += input_dims

        self.input_slice = [0] + list(accumulate(add, self.input_dims))
    def __init__(self,
                 config: dict = None,
                 Model=None,
                 pose_autoencoder=None,
                 feature_dims=None,
                 input_slicers: list = None,
                 output_slicers: list = None,
                 train_set=None,
                 val_set=None,
                 test_set=None,
                 name="MotionGeneration"):
        super().__init__()

        self.feature_dims = feature_dims
        self.config = config

        self.loss_fn = config[
            "loss_fn"] if "loss_fn" in config else nn.functional.mse_loss
        self.opt = config[
            "optimizer"] if "optimizer" in config else torch.optim.Adam
        self.scheduler = config["scheduler"] if "scheduler" in config else None
        self.scheduler_param = config[
            "scheduler_param"] if "scheduler_param" in config else None
        self.batch_size = config["batch_size"]
        self.learning_rate = config["lr"]

        self.best_val_loss = np.inf
        self.phase_smooth_factor = 0.9

        self.pose_autoencoder = pose_autoencoder if pose_autoencoder is not None else \
            MLP(config=config, dimensions=[feature_dims["pose_dim"]], name="PoseAE")
        self.use_label = pose_autoencoder is not None and pose_autoencoder.use_label

        cost_hidden_dim = config["cost_hidden_dim"]
        self.cost_encoder = MLP(config=config,
                                dimensions=[
                                    feature_dims["cost_dim"], cost_hidden_dim,
                                    cost_hidden_dim, cost_hidden_dim
                                ],
                                name="CostEncoder",
                                single_module=-1)

        self.generationModel = Model(config=config,
                                     dimensions=[
                                         feature_dims["g_input_dim"],
                                         feature_dims["g_output_dim"]
                                     ],
                                     phase_input_dim=feature_dims["phase_dim"])

        self.input_dims = input_slicers
        self.output_dims = output_slicers
        self.in_slices = [0] + list(accumulate(add, input_slicers))
        self.out_slices = [0] + list(accumulate(add, output_slicers))

        self.train_set = train_set
        self.val_set = val_set
        self.test_set = test_set
        self.name = name
Пример #4
0
def experiment_train_val_seq_batch_mlp():
    use_validation_set = False
    case = 1

    [inputs, inputs_labels, input_validation,
     input_validation_labels] = Utils.create_non_linearly_separable_data_2(
         use_validation_set=use_validation_set, case=case)

    num_hidden_nodes_layer_1 = 20
    num_iterations = 1000
    learning_rate = 0.002
    verbose = False

    mlp_batch = MLP(inputs=inputs,
                    inputs_labels=inputs_labels,
                    input_validation=input_validation,
                    input_validation_labels=input_validation_labels,
                    num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                    num_iterations=num_iterations,
                    learning_rate=learning_rate,
                    batch_train=True,
                    verbose=verbose)

    [_, _, mse_batch] = mlp_batch.fit()
    train_batch_mse_batch = mlp_batch.mse
    eval_batch_mse_batch = mlp_batch.validation_mse

    Utils.plot_decision_boundary_mlp(
        inputs, inputs_labels, mlp_batch,
        'MLP with learning rate {0}, iterations {1} , num hidden nodes {2}'.
        format(learning_rate, num_iterations, num_hidden_nodes_layer_1))

    mlp_seq = MLP(inputs=inputs,
                  inputs_labels=inputs_labels,
                  input_validation=input_validation,
                  input_validation_labels=input_validation_labels,
                  num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                  num_iterations=num_iterations,
                  learning_rate=learning_rate,
                  batch_train=False,
                  verbose=verbose)

    [_, _, mse_seq] = mlp_seq.fit()
    train_seq_mse_batch = mlp_seq.mse
    eval_seq_mse_batch = mlp_seq.validation_mse

    mse = [
        train_batch_mse_batch, train_seq_mse_batch, eval_batch_mse_batch,
        eval_seq_mse_batch
    ]
    legend_names = ['train batch', 'train seq', 'eval batch', 'eval seq']
    Utils.plot_error_with_epochs(
        mse,
        legend_names=legend_names,
        num_epochs=num_iterations,
        title='MLP with lr = {0}, iterations = {1} , hidden nodes = {2} '.
        format(learning_rate, num_iterations, num_hidden_nodes_layer_1))
def test(args, device):
    full_data = get_data_loader(args)

    if args.model_type == "CNN":
        from CNN import CNN
        model = CNN(args).to(device)
    elif args.model_type == "MLP":
        from MLP import MLP
        model = MLP(args).to(device)
    elif args.model_type == "LSTM":
        from LSTM import LSTM
        model = LSTM(args).to(device)

    optimiser = optim.Adam(
        model.parameters(), lr=args.learning_rate)

    state = torch.load(args.model_path, map_location=device)
    model.load_state_dict(state['model'])
    optimiser.load_state_dict(state['optimiser'])

    total_difference = 0
    n = 0

    for batch_num, data in enumerate(full_data):
        x, y = data[0].float().to(device), data[1].float().to(device)
        num_of_predictions = x.shape[0]
        pred = model(x)
        pred = pred.reshape(y.shape)
        total_difference += sum((abs(pred - y)/y) * 100)
        n += num_of_predictions

    return total_difference/n
Пример #6
0
    def __init__(self, env, alpha, gamma, episode_num, target_reward,
                 step_count, minbatch, memory_size, flag):
        self.env = env
        self.alpha = alpha
        self.gamma = gamma
        self.episode_num = episode_num
        self.target_reward = target_reward
        self.step_count = step_count
        # self.test_step=test_step
        self.minbatch = minbatch
        self.memory_size = memory_size
        self.flag = flag
        self.Q = MLP()
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.spaces[
            0].n * env.action_space.spaces[1].n
        # self.action_dim = env.action_space.n

        self.Q.creat2(self.state_dim, env.action_space.spaces[0].n,
                      env.action_space.spaces[1].n)

        self.memory_num = 0
        self.memory = np.zeros((memory_size, self.state_dim * 2 + 4))
        self.optimizer = torch.optim.Adam(self.Q.parameters(), lr=alpha)
        self.loss_func = nn.MSELoss()
def main():
    """
    Run the test case XOR
    """

    print("...Reading dataset")
    dataset = load_dataset("datasets/xor.dat")
    print("...done!")

    print("...Spliting the dataset")
    training_samples, testing_samples, labels_train, labels_test = split_dataset(
        dataset)
    print("...done!")

    print("...Creating the classifier")
    clf = MLP(input_layer=2, hidden=2, output=1)
    print("...done!")

    print("...Fiting the clf")
    clf.fit(training_samples, labels_train, verbose_error=True)
    print("...done!")

    print("...Made a prediction")
    pred = clf.predict(testing_samples)
    print("...done!")

    print('Convergence: with MSE:{}'.format(clf.error))

    print(clf)

    print(
        pd.DataFrame.from_items([('Expected', labels_test),
                                 ('Obtained', pred)]))

    clf.plot_errors()
Пример #8
0
    def __init__(self):
        self.type = None
        self.nn = MLP()
        self.training_method = None
        self.activation_function = None
        self.dropout_rate = 0.0

        self.training = True
        self.learning_rate = 0.1
        self.fitness_threshold = 0.75
        self.epoch_threshold = -1
        self.batch_size = 100
        self.shuffle_rate = 2500
        self.display_step = 1000

        self.epoch = 0

        self.layers = []

        self.data_set = None

        self.bed = BinaryEncoderDecoder()
        self.utils = Utilities()

        self.debug_mode = False

        # Plotting variables
        self.losses = []
        self.fitnesses = []
        self.iterations = []

        self.save_location = './nn/log/'
Пример #9
0
def test_units_accuracy(units, steps, epochs):
    accuracies = []
    units = range(1, units, steps)
    for unit in units:
        mlp = MLP(ReLU, X_train.shape[1], [unit])
        accuracies.append(test_mlp_model(mlp, epochs, 30, plot=False))
    plot_unit_accuracy(accuracies, units)
Пример #10
0
def mlp_test(test_set, Model, n_input=2030, n_output=150, n_hidden=50):
    datasets = load_data(test_set, test_set, test_set)

    test_set_x, test_set_y = datasets[0]
    index = T.lscalar()  # index to a [mini]batch

    x = T.vector('x')  # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of

    rng = numpy.random.RandomState(1234)
    # construct the MLP class
    classifier = MLP(rng=rng,
                     input=x,
                     n_in=n_input,
                     n_hidden=n_hidden,
                     n_out=n_output,
                     Model=Model)

    #classifier.hiddenLayer.__setstate__((Model['hidden_W'], Model['hidden_b']))
    #classifier.logRegressionLayer.__setstate__((Model['logRegression_W'], Model['logRegression_b']))

    test_model = theano.function(inputs=[index],
                                 outputs=classifier.predictAll,
                                 givens={
                                     x: test_set_x[index],
                                 })

    out = test_model(0).tolist()

    return out
Пример #11
0
def Main():
    x_train = pd.read_csv('Dataset/xtrain_3spirals.txt', sep='	', header=None)
    x_test = pd.read_csv('Dataset/xtest_3spirals.txt', sep='	', header=None)

    d_train = pd.read_csv('Dataset/dtrain_3spirals.txt', sep=',', header=None)
    d_test = pd.read_csv('Dataset/dtest_3spirals.txt', sep=',', header=None)

    ## Aplication of MLP algorithm
    mlp = MLP(15000, 0.15, 0.000001, [4, 3], 0.5)
    mlp.train(x_train.to_numpy(), d_train.to_numpy())

    new_classes = mlp.application(x_test.to_numpy())

    comparative = np.concatenate((d_test.to_numpy(), new_classes), 1)

    print("Matrix of comparative between classes")
    print(comparative)

    print("------------------------------")

    hit_table = np.zeros((len(new_classes), 1))
    for row in range(len(new_classes)):
        if all(d_test.to_numpy()[row] == new_classes[row]):
            hit_table[row] = 1

    tax_hit = sum(hit_table) / len(new_classes)

    print("------------------------------")

    print("Matrix of hits")
    print(hit_table)

    print("------------------------------")

    print("Tax of hits: " + str(tax_hit))
Пример #12
0
    def __init__(self,
                 config: dict = None,
                 input_dims: list = None,
                 pose_labels=None,
                 train_set=None,
                 val_set=None,
                 test_set=None,
                 name: str = "model",
                 save_period=5,
                 workers=6):

        super(RBF, self).__init__()

        M = len(input_dims)

        self.name = name
        self.input_dims = input_dims
        self.input_slice = [0] + list(accumulate(add, input_dims))

        self.act = nn.ELU
        self.save_period = save_period
        self.workers = workers
        self.pose_labels = pose_labels if pose_labels is not None else [
            None for _ in range(M)
        ]

        self.config = config
        self.basis_func = basis_func_dict()[config["basis_func"]]
        self.hidden_dim = config["hidden_dim"]
        self.keep_prob = config["keep_prob"]
        self.k = config["k"]
        self.learning_rate = config["lr"]
        self.batch_size = config["batch_size"]

        self.loss_fn = config[
            "loss_fn"] if "loss_fn" in config else nn.functional.mse_loss
        self.opt = config[
            "optimizer"] if "optimizer" in config else torch.optim.Adam
        self.scheduler = config["scheduler"] if "scheduler" in config else None
        self.scheduler_param = config[
            "scheduler_param"] if "scheduler_param" in config else None

        self.models = [
            MLP(config=config,
                dimensions=[input_dims[i]],
                pose_labels=self.pose_labels[i],
                name="M" + str(i),
                single_module=0) for i in range(M)
        ]
        self.active_models = self.models

        self.cluster_model = RBF_Layer(in_features=self.k,
                                       out_features=self.k,
                                       basis_func=self.basis_func)

        self.train_set = train_set
        self.val_set = val_set
        self.test_set = test_set

        self.best_val_loss = np.inf
Пример #13
0
 def __init__(self, x, model_file):
     """
     Sampling works as follows.
     You feed it a model and a dataset, and model_files. 
     The model_files allow you to load in models from different checkpoints.
     A feed through function is compiled that samples from the test dataset. 
     It calculates the error and the output for each element in test dataset. 
     It generates two distributions -- output for signal, and output for background.
     args:
         model: MLP object
         dataset Dataset object
         model_files: list of files corresponding to saved models  
     """
     self.model_file = model_file
     self.param = self.detect_params(self.model_file)
     self.dataset = Dataset(self.param['dataset'])
     self.dataset.set_indexing(self.param['indexing'])
     self.shared_train_x = self.dataset.train_x
     self.shared_train_y = self.dataset.train_y
     self.shared_test_x = self.dataset.test_x
     self.shared_test_y = self.dataset.test_y
     try:
         self.train_labels = self.dataset.train_labels
         self.test_labels = self.dataset.test_labels
     except AttributeError:
         print(
             "You're used a dataset without labels. You won't be able to call gen_labeled_outputs"
         )
     mlp = MLP(x, [self.param['h0'], self.param['h1'], 2],
               np.random.RandomState(1234),
               transfer_func=T.nnet.relu)
     mlp.load_params(self.model_file, mode='hdf5')
     self.model = mlp
     self.predicted = dict()
Пример #14
0
    def load_pretrain_weights(self):
        """Loading weights from trained MLP model & GMF model"""
        config = self.config
        config['latent_dim'] = config['latent_dim_mlp']
        mlp_model = MLP(config)
        if config['use_cuda'] is True:
            mlp_model.cuda()
        resume_checkpoint(mlp_model,
                          model_dir=config['pretrain_mlp'],
                          device_id=config['device_id'])

        self.embedding_user_mlp.weight.data = mlp_model.embedding_user.weight.data
        self.embedding_item_mlp.weight.data = mlp_model.embedding_item.weight.data
        for idx in range(len(self.fc_layers)):
            self.fc_layers[idx].weight.data = mlp_model.fc_layers[
                idx].weight.data

        config['latent_dim'] = config['latent_dim_mf']
        gmf_model = GMF(config)
        if config['use_cuda'] is True:
            gmf_model.cuda()
        resume_checkpoint(gmf_model,
                          model_dir=config['pretrain_mf'],
                          device_id=config['device_id'])
        self.embedding_user_mf.weight.data = gmf_model.embedding_user.weight.data
        self.embedding_item_mf.weight.data = gmf_model.embedding_item.weight.data

        self.affine_output.weight.data = 0.5 * torch.cat([
            mlp_model.affine_output.weight.data,
            gmf_model.affine_output.weight.data
        ],
                                                         dim=-1)
        self.affine_output.bias.data = 0.5 * (
            mlp_model.affine_output.bias.data +
            gmf_model.affine_output.bias.data)
Пример #15
0
def main(args):
    #建立文件夹
    result_path = os.path.join(args.output_path, "final_result")
    check_dir(args.output_path)
    check_dir(result_path)

    #设置gpu
    torch.cuda.set_device(args.gpu_id)  #GPU

    #加载数据
    train_set = MySet(args.txt_path, mode="train")
    val_set = MySet(args.txt_path, mode="val")
    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True)
    val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False)

    #网络模型
    net = MLP(args)
    net.cuda()  #GPU

    #使用Adam优化器和交叉熵损失函数
    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=args.lr,
                                 weight_decay=1e-4)
    cost = torch.nn.CrossEntropyLoss()
    epoch_loss, epoch_acc, val_loss, val_acc = [], [], [], []

    #epoch循环
    t0 = time.time()

    for epoch in range(args.epoch):
        t_loss, t_acc = train_epoch(net, train_loader, optimizer, cost)
        epoch_loss.append(t_loss)
        epoch_acc.append(t_acc)

        v_loss, v_acc = val_epoch(net, val_loader, cost)
        val_acc.append(v_acc)
        val_loss.append(v_loss)

        #画出训练集和测试集的loss及acc

        plot(epoch_acc, val_acc, result_path, 'train_acc', 'val_acc',
             args.batch_size, 'acc')
        plot(epoch_loss, val_loss, result_path, 'train_loss', 'val_loss',
             args.batch_size, 'loss')

        info = [str(epoch).zfill(3), t_loss, v_acc]
        print("Epoch: {} | train Loss: {:.4f} val ACC: {:.4f}".format(*info))

    t1 = time.time()
    print("Optimization Finished!  Cost time:{:.1f} minutes".format(
        (t1 - t0) / 60))
    print("The final acc=%g" % v_acc)

    #保存最终模型
    state = net.state_dict()

    torch.save(state, os.path.join(result_path, "Network_final.pth.gz"))
Пример #16
0
def main(client_id):
    while True:
        try:
            response = requests.get(url + 'deepLearning')
            json_data = json.loads(response.text)
            # response_message = response.content().decode('utf-8')
            image_file_index = int(json_data['image_file_index'])
            epoch_number = int(json_data['epoch_number'])
            print('image_index_file: ' + str(image_file_index))
            print('epoch_number: ' + str(epoch_number))
            mode = str(json_data['mode'])
            print('mode: ' + mode)
            if mode == 'stop':
                return

            if mode == 'wait':
                time.sleep(1.5)
                continue

            client = MongoClient(
                'mongodb://*****:*****@ds111529.mlab.com:11529/primre')
            _db = client.primre
            print('start download network')
            try:
                network = _db.Network.find_one({'id': 1})
                l1_w_list = network['l1_list']
                l2_w_list = network['l2_list']
            except:
                _db.GlobalParameters.update_one(
                    {'id': 1}, {'$inc': {
                        'image_file_index': -1
                    }})
                continue
            print('finish download network')
            lin_neural_network_l1 = L.Linear(784, 300)
            lin_neural_network_l2 = L.Linear(300, 10)
            for i in range(300):
                for j in range(784):
                    lin_neural_network_l1.W.data[i][j] = l1_w_list[i][j]
            for i in range(10):
                for j in range(300):
                    lin_neural_network_l2.W.data[i][j] = l2_w_list[i][j]
            mlp = MLP(lin_neural_network_l1, lin_neural_network_l2)
            file_images_name = '~/images_train/image_' + str(image_file_index)
            file_labels_name = '~/labels_train/label_' + str(image_file_index)
            if mode == 'test':
                file_images_name = '~/images_test/images_' + str(
                    image_file_index)
                file_labels_name = '~/labels_test/label_' + str(
                    image_file_index)

            if mode == "train":
                train(_db, client_id, mlp, file_images_name, file_labels_name,
                      l1_w_list, l2_w_list)
            else:
                validate_test(_db, mode, mlp, epoch_number, file_images_name,
                              file_labels_name)
        except:
            continue
Пример #17
0
def test_depth_accuracy(depth, epochs):
    accuracies = []
    depths = range(depth)
    for depth in depths:
        print(f"MLP depth: {depth}")
        mlp = MLP(ReLU, X_train.shape[1], [128] * depth)
        accuracies.append(test_mlp_model(mlp, epochs, 30, plot=False))
    plot_depth_accuracy(accuracies, depths)
def test():
    model_to_be_restored = MLP()
    checkpoint = tf.train.Checkpoint(myAwesomeModel=model_to_be_restored)
    checkpoint.restore(tf.train.latest_checkpoint('./check_point'))
    y_pred = np.argmax(model_to_be_restored.predict(data_loader.test_data),
                       axis=-1)
    print("test accuracy: %f" %
          (sum(y_pred == data_loader.test_label) / data_loader.num_test_data))
Пример #19
0
def tune(n):
    model = MLP((4, ), training_epochs=5000, beta=betas[n], debug=False)

    m = Model(model, transfs, gen_x, gen_y, RMSE)
    window = [1, 4, 12]
    ret = m.expanding_window(X_train, y_train, TRAIN_OFFSET, window, 'dynamic')
    print(betas[n])
    return betas[n], ret[1][3].iloc[-1, 0], ret[1][0], ret[4][0], ret[12][0]
Пример #20
0
 def get_mlp(self):
     try:
         hidden_nodes = int(self.hidden_nodes_input.get())
         learning_rate = float(self.learning_rate_input.get())
         return MLP(input_nodes=784, hidden_nodes=hidden_nodes,
                           output_nodes=10, learning_rate=learning_rate)
     except ValueError:
         return None
Пример #21
0
 def __init__(self, img=None, model_path='my_model.npz'):
     self.origin_img = img
     self.preprocess_img = None
     self.detected_number = None
     self.number_model_path = model_path
     self.net = L.Classifier(MLP(1000, 10))
     self.data_directory = "data"
     self.img_directory = "imgs"  # 画像を保管するディレクトリ
     self.setup()
Пример #22
0
def experiment_learning_curves_error():
    train_test = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]

    use_validation_set = True
    num_hidden_nodes_layer_1 = 20
    num_iterations = 1000
    learning_rate = 0.001
    verbose = False

    cases = [1, 2, 3, 4]
    train_MSE = []
    val_MSE = []
    for case in cases:
        [inputs, inputs_labels, input_validation,
         input_validation_labels] = Utils.create_non_linearly_separable_data_2(
             use_validation_set=use_validation_set, case=case)

        print(case)

        current_train = []
        current_validation = []
        for check in train_test:
            X_train, X_test, y_train, y_test = train_test_split(
                inputs.T, inputs_labels, test_size=check, random_state=42)

            mlp_batch = MLP(inputs=X_train.T,
                            inputs_labels=y_train,
                            input_validation=input_validation,
                            input_validation_labels=input_validation_labels,
                            num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                            num_iterations=num_iterations,
                            learning_rate=learning_rate,
                            batch_train=True,
                            verbose=verbose)

            [_, _, mse_batch] = mlp_batch.fit()

            current_train.append(mlp_batch.mse[-1])
            current_validation.append(mlp_batch.validation_mse[-1])

        train_MSE.append(current_train)
        val_MSE.append(current_validation)

    legend_names = [
        'train mse error case 1', 'train mse error case 2',
        'train mse error case 3', 'train mse error case 4',
        'validation mse error case 1', 'validation mse error case 2',
        'validation mse error case 3', 'validation mse error case 4'
    ]

    Utils.plot_learning_curves(
        train_MSE,
        legend_names=legend_names,
        train_size=train_test,
        title='Learning curve with lr = {0}, iterations = {1} '.format(
            learning_rate, num_iterations),
        loss=val_MSE)
Пример #23
0
def generateModel(learningRate):
    layer0 = Layer(4, 0, 4, ActivationFunction.linear)
    layer1 = Layer(3, 1, 4, ActivationFunction.sigmoid)
    layer2 = Layer(3, 2, 3, ActivationFunction.sigmoid)
    layers = []
    layers.append(layer0)
    layers.append(layer1)
    layers.append(layer2)
    return MLP(layers, learningRate)
Пример #24
0
def tune_learning_rate():
    accuracies = []
    lrs = [0.001, 0.01, 0.1]  # Figure10, 11, 12
    for lr in lrs:
        print(
            "\n\nmlp a 2 hidden layers having 128 units with ReLU activation")
        print(f"minibatch sizes: 30, learning_rate={lr}")
        mlp = MLP(ReLU, X_train.shape[1], [128, 128], lr=lr)
        accuracies.append(test_mlp_model(mlp, 30, 30))
    plot_vs_accuracy(accuracies, lrs, "Learning rate")
Пример #25
0
def tune_batchsize():
    accuracies = []
    bsizes = [10, 100, 1000]  # Fig15, 16, 17
    for bsize in bsizes:
        print(
            "\n\nmlp a 2 hidden layers having 128 units with ReLU activation")
        print(f"minibatch sizes: {bsize}, learning_rate={0.1}")
        mlp = MLP(ReLU, X_train.shape[1], [128, 128])
        accuracies.append(test_mlp_model(mlp, 30, bsize))
    plot_vs_accuracy(accuracies, bsizes, "Batch size")
Пример #26
0
def initialize_model(args):
    if args.model == 'ridge':
        return Ridge()
    elif args.model == 'mlp':
        return MLP(n_input=x.shape[-1], n_mid=args.n_mid, lr=args.lr, n_output=t.shape[-1], batch_size=500,
                    shuffle_batches=args.shuffle, drop=args.drop, n_epochs=args.n_epochs, toplot=False)
    elif args.model == 'lstm':
        return LSTM(n_input=x.shape[-1], n_mid=args.n_mid, lr=args.lr, w_decay=0., n_output=t.shape[-1],
                      batch_size=200, n_back=args.n_back, drop=args.drop, n_epochs=args.n_epochs, toplot=False)
    else:
        raise Exception('Unknown model')
Пример #27
0
def experiment_train_validation_nodes():
    use_validation_set = True

    num_iterations = 1000
    learning_rate = 0.002
    verbose = False

    nodes = [1, 5, 10, 20, 25]
    cases = [1, 2, 3, 4]
    train_MSE = []
    val_MSE = []

    for case in cases:
        print(case)
        [inputs, inputs_labels, input_validation,
         input_validation_labels] = Utils.create_non_linearly_separable_data_2(
             use_validation_set=use_validation_set, case=case)

        current_mse = []
        current_val_mse = []
        for node in nodes:
            mlp_batch = MLP(inputs=inputs,
                            inputs_labels=inputs_labels,
                            input_validation=input_validation,
                            input_validation_labels=input_validation_labels,
                            num_nodes_hidden_layer=node,
                            num_iterations=num_iterations,
                            learning_rate=learning_rate,
                            batch_train=True,
                            verbose=verbose)

            [_, _, mse_batch] = mlp_batch.fit()

            current_mse.append(mlp_batch.mse[-1])
            current_val_mse.append(mlp_batch.validation_mse[-1])

        train_MSE.append(current_mse)
        val_MSE.append(current_val_mse)

    legend_names = [
        'train mse error case 1', 'train mse error case 2',
        'train mse error case 3', 'train mse error case 4',
        'validation mse error case 1', 'validation mse error case 2',
        'validation mse error case 3', 'validation mse error case 4'
    ]

    Utils.plot_error_hidden_nodes(
        train_MSE,
        legend_names=legend_names,
        hidden_nodes=nodes,
        title='MLP with learning rate {0}, iterations {1} '.format(
            learning_rate, num_iterations),
        loss=val_MSE)
Пример #28
0
    def __init__(self,
                 nfeat,
                 nhid,
                 nlayer,
                 nclasses,
                 mlpPos,
                 loss="kl",
                 useDropout=False,
                 keepProb=0.5,
                 useBatchNorm=False,
                 layerNorm=False,
                 detach=False):
        super(GIN_pyg, self).__init__()
        self.nlayer = nlayer
        self.mlpPos = mlpPos
        self.useDropout = useDropout
        self.keepProb = keepProb
        self.useBatchNorm = useBatchNorm
        self.loss = loss
        self.layerNorm = layerNorm
        self.detach = detach

        self.GinMlps = nn.ModuleList()
        self.bns = nn.ModuleList()
        #         self.rnns = nn.ModuleList()
        for i in range(nlayer):
            self.GinMlps.append(
                MLP([nhid, nhid, nhid], useDropout, keepProb, useBatchNorm))
            if self.useBatchNorm == True:
                self.bns.append(nn.BatchNorm1d(nhid))
#             self.rnns.append(torch.nn.GRUCell(nhid, nhid, bias=True))

        self.mlp1 = MLP([nfeat, nhid], useDropout, keepProb, useBatchNorm)
        if self.useBatchNorm == True:
            self.bn1 = nn.BatchNorm1d(nhid)
        self.mlps = nn.ModuleList()
        for i in range(len(mlpPos)):
            self.mlps.append(
                MLP([nhid, nhid, nclasses[i]], useDropout, keepProb,
                    useBatchNorm))
Пример #29
0
    def __init__(self, 
                 enc_past_size, 
                 enc_dest_size, 
                 enc_latent_size, 
                 dec_size, 
                 predictor_size, 
                 fdim, 
                 zdim, 
                 sigma,
                 past_length, 
                 future_length, 
                 verbose):
        '''
        Args:
            size parameters: Dimension sizes
            sigma: Standard deviation used for sampling N(0, sigma)
            past_length: Length of past history (number of timesteps)
            future_length: Length of future trajectory to be predicted
        '''
        super(PECNet, self).__init__()

        self.zdim = zdim
        self.sigma = sigma

        # takes in the past
        self.encoder_past = MLP(input_dim = past_length*2, output_dim = fdim, hidden_size=enc_past_size)
        self.encoder_dest = MLP(input_dim = 2, output_dim = fdim, hidden_size=enc_dest_size)
        self.encoder_latent = MLP(input_dim = 2*fdim, output_dim = 2*zdim, hidden_size=enc_latent_size)
        self.decoder = MLP(input_dim = fdim + zdim, output_dim = 2, hidden_size=dec_size)
        self.predictor = MLP(input_dim = 2*fdim, output_dim = 2*(future_length-1), hidden_size=predictor_size)

        architecture = lambda net: [l.in_features for l in net.layers] + [net.layers[-1].out_features]

        if verbose:
            print("Past Encoder architecture : {}".format(architecture(self.encoder_past)))
            print("Dest Encoder architecture : {}".format(architecture(self.encoder_dest)))
            print("Latent Encoder architecture : {}".format(architecture(self.encoder_latent)))
            print("Decoder architecture : {}".format(architecture(self.decoder)))
            print("Predictor architecture : {}".format(architecture(self.predictor)))
Пример #30
0
Файл: RL.py Проект: Tarkof/DRL
    def __init__(self, episode_size=150):
        self.model = MLP((SCREEN_HEIGHT_g, SCREEN_WIDTH_g), 300)
        #self.load("models/model_1185.npz")
        self.activations = []
        self.frames = []
        self.states_alive = []

        self.episode_size = episode_size
        self.episode_decisions = np.zeros((8))

        self.episodes_wins = 0
        self.episodes_nb = 0
        self.iter = 0