Пример #1
0
    def __init__(self, args):
        self.args = args

        comm = MPI.COMM_WORLD
        self.comm = comm
        self.n_workers = comm.size
        self.rank = comm.rank

        if args.clean:
            self.clean_model_dir()

        env = gym.make(args.env_name)
        if args.monitor and self.rank == 0:
            env = gym.wrappers.Monitor(env, args.env_name + '_monitor', force=True)
        self.env = env

        self.n_ptbs = self.n_workers

        self.p = eslib.Perturbation()
        n_actions = env.action_space.n
        self.model = mlp.MLP(n_actions)

        optimizer = O.Adam(alpha=1e-2)
        optimizer.setup(self.model)
        optimizer.add_hook(GradientClipping(1e2))
        optimizer.add_hook(WeightDecay(0.0005))
        self.optimizer = optimizer

        eslib.fix_model(self.model, gym.make(args.env_name).reset())
Пример #2
0
    def train_learner_opt(self, X, Y, X_test, Y_test, learner_params=[]):
        """ """
        #        self.logging.info('Standardizing data!')
        #        Y_test = np.array(Y_test)
        #        scaler = StandardScaler()
        #        X = scaler.fit_transform(X)
        #        X_test = scaler.transform(X_test)
        #        self.logging.info('X size is: %s and Y size is: %s and X_test size is: %s and Y_test size is: %s',
        #                           '_'.join(map(str,X.shape)), str(len(Y)), '_'.join(map(str,X_test.shape)), str(len(Y_test)))

        X, Y, X_test, Y_test = self.scale_all_data(X, Y, X_test, Y_test)

        if self.method == 'classification':
            clf = mlp.MLP(learner_params['l_rate'], learner_params['l1'],
                          learner_params['l2'], learner_params['n_hidden'],
                          learner_params['best_score'])
            self.logging.info('optimal MLP classifier trained')
        elif self.method == 'regression':
            pass
#            clf = self.learner()
#            self.logging.info('optimal linear regressor trained with alpha = %s!', str(learner_params['C']))

        clf.fit((X, X_test), (np.array(Y), np.array(Y_test)))

        self.fit_opt_learner(X, Y, X_test, Y_test, clf)
def classify(fig):

    # Standard MNIST format is 28x28 pixels and grayscale.
    height, width, depth = 28, 28, 1

    # Proccess image (c.f. fig_to_misc.py) and convert to numpy array
    X = mnist_treat(fig)
    X_array = np.array(X)

    # Flatten image
    X_flat = X_array.reshape(1, height * width)

    # Normalize to 1 = white.
    X_flat = X_flat.astype('float32')
    X_flat /= 255

    net = mlp.MLP()

    # load latest model
    path = os.path.join(os.curdir, 'models/*')
    files = sorted(glob.iglob(path), key=os.path.getctime, reverse=True)

    net.load(files[0])

    # Run prediction
    prediction = net.predict(X_flat)

    return prediction[0]
Пример #4
0
def test_mlp(args):
    """Tests MLP."""

    torch.set_default_dtype(torch.double)
    X_train = np.loadtxt(f'{args.Output_path}X_train.csv', delimiter=',')
    X_test_ID = np.loadtxt(f'{args.Output_path}X_test_ID.csv', delimiter=',')
    X_test_OOD = np.loadtxt(f'{args.Output_path}X_test_OOD.csv', delimiter=',')
    notear_model = torch.load(f'{args.Output_path}notear.pt')

    model, ISL_id_train_mse, ISL_id_test_mse = mlp(X_train[:, 1:args.num_xy],
                                                   X_train[:, 0],
                                                   X_test_ID[:, 1:args.num_xy],
                                                   X_test_ID[:, 0],
                                                   epoches=200)
    # model_OOD, ISL_ood_train_mse, ISL_ood_test_mse = mlp(
    #  X_train[:,1:args.num_xy], X_train[:,0], X_test_OOD[:, 1:args.num_xy],
    #  X_test_OOD[:,0], epoches=200)
    torch.save(model.state_dict(), f'{args.Output_path}_induced_mlp.pt')
    model = MLP.MLP(n_feature=args.num_xy - 1,
                    n_hidden=100,
                    n_output=1,
                    activation='Sigmoid')

    model.load_state_dict(torch.load(f'{args.Output_path}_induced_mlp.pt'))
    for data in [X_test_ID, X_test_OOD]:
        ISL_pred = model(torch.tensor(data[:, 1:args.num_xy]))
        notear_model.eval()
        notear_pred = notear_model(torch.tensor(data))
        ISL_mse = mean_squared_loss(data.shape[0], data[:, 0],
                                    np.array(ISL_pred.detach().cpu())[:, 0])
        notear_mse = mean_squared_loss(
            data.shape[0], data[:, 0],
            np.array(notear_pred.detach().cpu())[:, 0])
        print(f'_ISL_pred', ISL_mse)
        print(f'_notear_pred_train', notear_mse)
Пример #5
0
def main():
    parser = argparse.ArgumentParser(description='okaeri kanojo trainer')
    args = get_args(parser)

    model = L.Classifier(mlp.MLP(N_IN, args.unit, N_OUT))
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # Load the sprecog dataset
    train_data = dataset.TrainingDataset(TRAIN_FILE)
    train = train_data.convert_to_dataset()
    test = train

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot at each epoch
    trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Print selected entries of the log to stdout
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()

    # Save the model
    chainer.serializers.save_npz(MODEL_FILE, model)
Пример #6
0
def main():
    n_nodes = int(input('Enter the number of nodes: '))
    model = mlp.MLP(n_nodes)
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    modes = ['Calculation', 'Learning', 'Exit']

    while True:
        draw_modes()
        select = int(input('Enter the number of mode: '))
        selected_mode = modes[select-1]

        if (selected_mode == 'Exit'):
            sys.exit()

        if (selected_mode == 'Calculation'):
            calc(model)

        if (selected_mode == 'Learning'):
            epoch = int(input('Enter the number of epoch: '))
            digit = input('Enter the number of learning digit: ')

            for _ in range(epoch):
                a = np.random.rand(2).reshape(1, 2).astype(np.float32) * eval('1e+' + digit)
                x = chainer.Variable(a)
                x = model(x)
                t = chainer.Variable(a.sum().reshape(1, 1))

                model.zerograds()
                loss = F.squared_error(x, t)
                loss.backward()
                optimizer.update()

                print('loss: ' + str(loss))
Пример #7
0
    def __init__(self, inSize, outSize):
        super().__init__()
        attn = attentionEncoder.AttentionEncoder(inputSize=inSize,
                                                 outputSize=1024,
                                                 attentionHidSize=512,
                                                 attentionRowSize=3)
        mlp = M.MLP([1024, 256, 512, outSize], 'tanh', noActFinal=True)

        self.add_link('attn', attn)
        self.add_link('mlp', mlp)
Пример #8
0
def main():
    training_data, validation_data, test_data = mnist.load()
    net = mlp.MLP([784, 36, 10])
    epochs = 500
    mini_batch_size = 10
    learning_rate = 0.5
    lmbda = 5.0
    drop_prob = 0.5
    net.sgd(training_data, epochs, mini_batch_size, test_data, learning_rate,
            lmbda, drop_prob)
Пример #9
0
    def train_learner_cv(self, X, Y, optimal=False):
        """ """
        self.logging.info('Standardizing data!')
        assert self.result_path != ''
        X = np.asarray(X, dtype=np.float32, order='F')
        Y = np.asarray(Y, dtype=np.short, order='F')
        scaler = StandardScaler()

        X = scaler.fit_transform(X)

        self.logging.info('X size is: %s and Y size is: %s',
                          '_'.join(map(str, X.shape)), map(str, Y.shape))

        for i in range(self.config.configuration["number_of_cvs"]):
            self.logging.info('iteration  number %s for cross validation',
                              str(i))
            X_new, Y_new = shuffle(X, Y, random_state=i)
            cv = StratifiedKFold(
                y=Y_new,
                n_folds=self.config.configuration["number_of_cv_folds"])

            for param_ind in range(len(self.scores)):
                print self.param_grid[param_ind]
                accs = np.zeros(
                    self.config.configuration["number_of_cv_folds"])
                fold_number = 0
                l_rate = self.param_grid[param_ind][0]
                l1 = self.param_grid[param_ind][1]
                l2 = self.param_grid[param_ind][2]
                n_hidden = self.param_grid[param_ind][3]
                self.learner = mlp.MLP(l_rate, l1, l2, n_hidden)

                for train_index, test_index in cv:
                    #                    print("TRAIN:", train_index, "TEST:", test_index)
                    X_train, X_test = X_new[train_index], X_new[test_index]
                    y_train, y_test = Y_new[train_index], Y_new[test_index]
                    datasets = [X_train, y_train, X_test, y_test]
                    accs[fold_number] = self.learner.fit((X_train, X_test),
                                                         (y_train, y_test))
                    #                     accs[fold_number] = mlp.apply_mlp(l_rate, l1, l2, n_hidden, datasets)
                    fold_number += 1

                self.scores[param_ind, i] = np.mean(accs)

        min_ind = np.argmin(np.mean(self.scores, axis=1))

        self.logging.info('Writing the results to file!')
        with open(self.result_path, 'w') as res_file:
            print >> res_file, np.mean(self.scores)

            print >> res_file, dict(l_rate=self.param_grid[min_ind][0],
                                    l1=self.param_grid[min_ind][1],
                                    l2=self.param_grid[min_ind][2],
                                    n_hidden=self.param_grid[min_ind][3])
            print >> res_file, np.std(self.scores, axis=1)
Пример #10
0
def main():
    words = 900
    training_data, validation_data, test_data = reuters.load(words)
    net = mlp.MLP([words, 80, 46])
    epochs = 200
    mini_batch_size = 16
    learning_rate = 0.5
    lmbda = 1.0
    drop_prob = 0.5
    net.sgd(training_data, epochs, mini_batch_size, test_data, learning_rate,
            lmbda, drop_prob)
Пример #11
0
    def set_params_dict(self, learner_params):

        if self.method == 'classification':
            self.learner = mlp.MLP(learner_params['learning_rate'],
                                   learner_params['l1'], learner_params['l2'],
                                   learner_params['n_hidden'],
                                   learner_params['best_error'])

        elif self.method == 'regression':

            sys.exit('Error! regression not implemented yet!!!!!')
Пример #12
0
    def set_params_list(self, learner_params, i):

        learning_rate = learner_params[0]
        l1 = learner_params[1]
        l2 = learner_params[2]
        n_hidden = learner_params[3]

        if self.method == 'classification':
            self.learner = mlp.MLP(learning_rate, l1, l2, n_hidden)

        elif self.method == 'regression':

            sys.exit('Error! regression not implemented yet!!!!!')
Пример #13
0
def run_mlp(bounds):
    _mlp = mlp.MLP(dense1=bounds[0],
                   dense2=bounds[1],
                   drop1=bounds[2],
                   drop2=bounds[3],
                   batch_size=bounds[4],
                   activation=bounds[5],
                   opt=bounds[6])

    mnist_evaluation = _mlp.mlp_evaluate()
    print(mnist_evaluation)

    return mnist_evaluation[0],
Пример #14
0
def mlp(X_train,
        Y_train,
        X_test,
        Y_test,
        epoches=100,
        hidden=100,
        lr=0.01,
        batch_size=64):
    """MLP training."""

    input_feature_dim = X_train.shape[1]
    output_feature_dim = 1
    x_train = torch.tensor(X_train)
    y_train = torch.tensor(Y_train)
    x_test = torch.tensor(X_test)
    y_test = torch.tensor(Y_test)
    train_dataset = MLP.CustomDataset(torch.tensor(X_train),
                                      torch.tensor(Y_train))
    test_dataset = MLP.CustomDataset(torch.tensor(X_test),
                                     torch.tensor(Y_test))
    test_loader = DataLoader(test_dataset, batch_size=batch_size)
    train_loader = DataLoader(train_dataset, batch_size=batch_size)

    model = MLP.MLP(n_feature=input_feature_dim,
                    n_hidden=hidden,
                    n_output=output_feature_dim,
                    activation='Sigmoid')

    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
    loss_func = torch.nn.MSELoss()

    for epoch in range(1, epoches):
        loss = MLP.train(model, epoch, train_dataset, train_loader, optimizer,
                         loss_func)
        train_loss = MLP.test(model, train_loader, loss_func)
        test_loss = MLP.test(model, test_loader, loss_func)
        if epoch % 10 == 0:
            print(
                'Epoch: {:03d}, Loss: {:.5f}, train_loss: {:.5f}, test_loss: {:.5f}'
                .format(epoch, loss, train_loss, test_loss))

    model.eval()
    output = model(x_test)
    mse = mean_squared_loss(x_test.shape[0], np.array(y_test.detach()),
                            np.array(output[:, 0].detach()))
    train_output = model(x_train)
    train_mse = mean_squared_loss(x_train.shape[0], np.array(y_train.detach()),
                                  np.array(train_output[:, 0].detach()))
    print(mse)
    return model, train_mse, mse
Пример #15
0
    def __init__(self, n_objects, in_features, unary, binary, terminal,
                 nonlinear, triu):
        super(RN, self).__init__()
        self.n_objects = n_objects
        self.in_features = in_features

        if unary:
            assert unary[0] == in_features
            assert unary[-1] * 2 == binary[0]
        else:
            assert binary[0] == in_features * 2
        assert binary[-1] == terminal[0]

        self.unary = mlp.MLP(unary, nonlinear) if unary else None
        self.binary = mlp.MLP(binary, nonlinear)
        self.terminal = mlp.MLP(terminal, nonlinear)
        self.nonlinear = nonlinear

        if triu:
            self.mask = th.triu(th.ones(self.n_objects, self.n_objects), 1)
            self.mask = self.mask.view(1, self.n_objects * self.n_objects, 1)
        else:
            self.mask = None
Пример #16
0
def logic_test():
    or_perceptron = mlp.MLP(2, alpha=0.9, eta=0.01)
    or_perceptron.add_layer(2)
    or_perceptron.add_layer(2)
    or_perceptron.add_layer(1)

    and_perceptron = mlp.MLP(2, alpha=0.9, eta=0.01)
    and_perceptron.add_layer(2)
    and_perceptron.add_layer(1)

    xor_perceptron = mlp.MLP(2, alpha=0.9, eta=0.05)
    xor_perceptron.add_layer(2)
    xor_perceptron.add_layer(2)
    xor_perceptron.add_layer(1)

    # X = np.array([ np.random.randint(0, 2, 2) for i in range(0,10) ])
    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

    T_or = np.reshape(np.array([(x[0] | x[1]) for x in X]), (-1, 1))
    T_and = np.reshape(np.array([(x[0] & x[1]) for x in X]), (-1, 1))
    T_xor = np.reshape(np.array([(x[0] ^ x[1]) for x in X]), (-1, 1))

    or_iterations = or_perceptron.train(X, T_or)
    xor_iterations = xor_perceptron.train(X, T_xor)
    and_perceptron.train(X, T_and)

    o_or = or_perceptron.recall(X)
    o_xor = xor_perceptron.recall(X)
    o_and = and_perceptron.recall(X)

    print "Or (" + str(or_iterations) + " iterations)"
    print T_or
    print o_or
    print "Xor (" + str(xor_iterations) + " iterations)"
    print T_xor
    print o_xor
Пример #17
0
 def load_weights(self, weights, x, y, index):
     self.members = []
     self.weights = []
     raise "needs fixing"
     for w in weights:
         rng = numpy.random.RandomState(self.params.random_seed)
         m = mlp.MLP(params=self.params,
                     rng=rng,
                     input=x,
                     index=index,
                     x=x,
                     y=y)
         m.set_weights(w)
         self.members.append(m)
     return self.members
Пример #18
0
def gauss_test():

    # Plots
    P_train = np.array([(12 * np.random.rand(1, 2)[0] - 6)
                        for i in range(0, 200)])
    P_train = np.transpose(P_train)
    X_train = P_train[0]
    Y_train = P_train[1]
    Z_train = np.reshape(np.exp(-(X_train**2 + Y_train**2) / 10), (-1, 1))

    p = mlp.MLP(2, alpha=0.9, eta=0.01, activation="sigmoid")
    p.add_layer(8)
    p.add_layer(4)
    p.add_layer(1)
    P_train = np.transpose(P_train)

    iterations = p.train(P_train, Z_train)

    print "iterations: " + str(iterations)

    P_test = np.array([(12 * np.random.rand(1, 2)[0] - 6)
                       for i in range(0, 200)])
    P_test = np.transpose(P_test)
    X_test = P_test[0]
    Y_test = P_test[1]
    # Z_test = np.exp(-(X_test ** 2+ Y_test **2)/10) - 0.5
    P_test = np.transpose(P_test)

    Z_recall = p.recall(P_test)

    GRID_POINTS = 100

    # Surface grid
    g = np.linspace(-6, 6, GRID_POINTS)

    G_X = np.array([g for i in range(0, GRID_POINTS)])
    G_Y = np.array([i * np.ones(GRID_POINTS) for i in g])
    G_Z = np.exp(-(G_Y**2 + G_X**2) / 10)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    # Plot a basic wireframe.
    ax.plot_wireframe(G_X, G_Y, G_Z, rstride=5, cstride=5, color="#bbaaaa")

    ax.scatter(X_test, Y_test, Z_recall, color="#00ff00")

    plt.show()
Пример #19
0
def MLP(k, df, num_perceptrons_in_layer, max_iteration, error_threshold, learning_rate, batch_size):
    
    num_data_per_fold = round(len(df.index) / k)
    models = []
    models_accuracy = []
    
    # Creating models and counting accuracy
    for i in range(k):
        first_idx_val = i * num_data_per_fold
        last_idx_val = (i+1) * num_data_per_fold - 1

        if (first_idx_val != 0 and last_idx_val != (len(df.index) - 1)):
            df_training_top = df.iloc[:first_idx_val, :].reset_index(drop = True)
            df_training_bottom = df.iloc[(last_idx_val + 1):, :].reset_index(drop = True)
            df_training = pandas.concat([df_training_top, df_training_bottom])
        elif (first_idx_val == 0 and last_idx_val != (len(df.index) - 1)):
            df_training = df.iloc[(last_idx_val + 1):, :].reset_index(drop = True)
        elif (first_idx_val != 0 and last_idx_val == (len(df.index) - 1)):
            df_training = df.iloc[:first_idx_val, :].reset_index(drop = True)
        else:
            df_training = pandas.DataFrame()

        df_validation = df.iloc[first_idx_val:last_idx_val, :].reset_index(drop = True)

        print("-------------------- MODEL", i+1, "--------------------")

        result_model = mlp.MLP(df_training, num_perceptrons_in_layer, max_iteration, error_threshold, learning_rate, batch_size)
        models.append(result_model)

        result_accuracy = mlp.count_accuracy(result_model, df_validation)
        models_accuracy.append(result_accuracy)
    
        # Printing each model and its accuracy
        
        models[i].print_model()
        print("ACCURACY:", models_accuracy[i])
        print("")

    best_model_idx = 0
    best_accuracy = models_accuracy[0]
    for i in range(k):
        if (models_accuracy[i] > best_accuracy):
            best_model_idx = i
            best_accuracy = models_accuracy[i]

    return models[best_model_idx]
Пример #20
0
def counter_exp(args):
    """Experiments with a counter."""

    torch.set_default_dtype(torch.double)
    X_train = np.loadtxt(f'{args.Output_path}X_train.csv', delimiter=',')
    X_test_ID = np.loadtxt(f'{args.Output_path}X_test_ID.csv', delimiter=',')
    X_test_OOD = np.loadtxt(f'{args.Output_path}X_test_OOD.csv', delimiter=',')

    model_ID = MLP.MLP(n_feature=2,
                       n_hidden=100,
                       n_output=1,
                       activation='Sigmoid')
    model_ID.load_state_dict(torch.load(f'{args.Output_path}_induced_mlp.pt'))
    notear_model = torch.load(f'{args.Output_path}notear.pt')

    counter = dict()
    model_ID.eval()

    data = X_train.copy()
    for counter_index in range(1, args.num_xy + args.num_s, 1):

        # counterfactual on trainining data
        data[:, counter_index] = np.random.normal(loc=5,
                                                  scale=10,
                                                  size=data.shape[0])
        # make prediction on the modified data
        data[:, 0] = sigmoid(np.sum(data[:, 1:args.num_xy], axis=1))

        ISL_pred = model_ID(torch.tensor(data[:, 1:args.num_xy]))
        notear_model.eval()
        notear_pred = notear_model(torch.tensor(data))
        ISL_mse = mean_squared_loss(data.shape[0], data[:, 0],
                                    np.array(ISL_pred.detach().cpu())[:, 0])
        notear_mse = mean_squared_loss(
            data.shape[0], data[:, 0],
            np.array(notear_pred.detach().cpu())[:, 0])
        print(f'counteron{counter_index}_ISL_pred', ISL_mse)
        print(f'counteron{counter_index}notear_pred_train', notear_mse)

        counter[f'counteron{counter_index}_ISL_pred'] = ISL_mse
        counter[f'counteron{counter_index}notear_pred_train'] = notear_mse

    with open(f'{args.Output_path}counter_u=5s=0_mse.json', 'w+') as f:
        json.dump(metrics, f, indent=4)
Пример #21
0
def mlpMethod(candidateSet, k):
    mlp_model = mlp.MLP(isReadModel=True, isRun=True)
    mlp_model.train_model(emb=200, epoch=20)
    candidateSet = [list(i) for i in candidateSet]
    area = candidateSet[0][0]
    context = "computer_science"
    candidate = []
    for d in range(len(candidateSet) - 1):
        tmpcats = candidateSet[d + 1]
        for c in tmpcats:
            candidate.append(c)

    candidate = list(set(candidate))

    scores = mlp_model.predict(area, candidate, context)
    ranked_scores = sorted(scores.items(), key=lambda x: x[1], reverse=True)
    #print(ranked_scores)
    ranked_scores = ranked_scores[:k]
    return ranked_scores
Пример #22
0
def select_ai(cl, data, targets, original_import):
    """ Select an AI. Create in instance of the needed class and return it. """
    ai = None

    # MLP
    if cl == "MLP":
        ai = mlp.MLP(data,
                     targets,
                     hidden_nodes=MLP_HIDDEN_NODES,
                     beta=MLP_BETA,
                     momentum=MLP_MOMENTUM)
        print "MLP created."

    # RBF
    elif cl == "RBF":
        # Adjust target format for RBF: [win, draw, loss]
        targets_rbf = ny.zeros((len(targets), 3))
        for i in range(len(targets)):
            win = 1 if targets[i] == WIN else 0
            draw = 1 if targets[i] == DRAW else 0
            loss = 1 if targets[i] == LOSS else 0
            targets_rbf[i] = [win, draw, loss]

        ai = rbf.RBF(data,
                     targets_rbf,
                     sigma=RBF_SIGMA,
                     rbfs_amount=RBF_NODES,
                     use_kmeans=RBF_KMEANS,
                     normalize=RBF_NORMALIZE)

        print "RBF created."

    # DTree
    elif cl == "DTree":
        data = []
        for value in original_import:
            zipped = zip(DATA_ATTRIBUTES, [datum for datum in value])
            data.append(dict(zipped))
        ai = dtree.DTree(data, DATA_ATTRIBUTES, DT_TARGET_ATTRIBUTE)

        print "DTree created."

    return ai
Пример #23
0
 def __init__(self, config):
     """ Initialize default values """
     #self.config = config
     # initialize the sbpca subsystem
     self.sbpca = sbpca.SbPca(config)
     # initialize the mlp subsytem
     self.net = mlp.MLP(config['wgt_file'], config['norms_file'])
     # parameters specific to SAcC part
     self.ptchtab = np.r_[0, np.loadtxt(config['pcf_file'])]
     self.hmm_vp = config['hmm_vp']
     self.n_s = 10.0
     self.start_utt = 0
     self.write_rownum = False
     self.write_time = False
     self.write_sbac = False
     self.write_sbpca = False
     self.write_posteriors = False
     self.write_pitch = True
     self.write_pvx = True
     self.dither_level = 1e-3
     if 'n_s' in config:
         self.n_s = config['n_s']
     if 'start_utt' in config:
         self.start_utt = config['start_utt']
     if 'write_rownum' in config:
         self.write_rownum = config['write_rownum']
     if 'write_time' in config:
         self.write_time = config['write_time']
     if 'write_sbac' in config:
         self.write_sbac = config['write_sbac']
     if 'write_sbpca' in config:
         self.write_sbpca = config['write_sbpca']
     if 'write_posteriors' in config:
         self.write_posteriors = config['write_posteriors']
     if 'write_pitch' in config:
         self.write_pitch = config['write_pitch']
     if 'write_pvx' in config:
         self.write_pvx = config['write_pvx']
     # added 2014-04-10
     if 'dither_level' in config:
         self.dither_level = config['dither_level']
Пример #24
0
def train(data, args):

    X_train, Y_train, X_dev, Y_dev, X_test, Y_test, U_train, U_dev, U_test, classLatMedian, classLonMedian, userLocation = data
    if args.model == 'mlp':
        classifier = mlp.MLP(n_epochs=args.epochs,
                             batch_size=args.batch,
                             complete_prob=False,
                             add_hidden=True,
                             regul_coef=args.reg,
                             save_results=False,
                             hidden_layer_size=args.hid,
                             drop_out_coef=args.drop,
                             early_stopping_max_down=5)
        classifier.fit(X_train, Y_train, X_dev, Y_dev)
        y_pred_dev = classifier.f_predict(X_dev)
        y_pred_test = classifier.f_predict(X_test)
    elif args.model == 'lr':
        classifier = SGDClassifier(loss='log',
                                   penalty='elasticnet',
                                   alpha=args.reg,
                                   n_iter=5,
                                   l1_ratio=0.9,
                                   n_jobs=5,
                                   random_state=77)
        classifier.fit(X_train, Y_train)
        y_pred_dev = classifier.predict(X_dev)
        y_pred_test = classifier.predict(X_test)
    else:
        logging.info(
            'model should be either lr or mlp, current model is {}'.format(
                args.model))

    logging.info('dev results')
    mean, median, acc_at_161 = geo_eval(Y_dev, y_pred_dev, U_dev,
                                        classLatMedian, classLonMedian,
                                        userLocation)
    logging.info('test results')
    mean, median, acc_at_161 = geo_eval(Y_test, y_pred_test, U_test,
                                        classLatMedian, classLonMedian,
                                        userLocation)
def MLP(df, num_perceptrons_in_layer, max_iteration, error_threshold,
        learning_rate, batch_size):
    # memisahkan data training dan testing dengan perbandingan 9:1
    separator_iris = round((9 / 10) * len(df.index))
    train_iris = df.iloc[:separator_iris, :].reset_index(drop=True)
    test_iris = df.iloc[separator_iris:, :].reset_index(drop=True)

    # pembelajaran dengan training data
    result_model = mlp.MLP(train_iris, num_perceptrons_in_layer, max_iteration,
                           error_threshold, learning_rate, batch_size)

    # menghitung kinerja
    result_accuracy = mlp.count_accuracy(result_model, test_iris)

    # menampilkan tree hasil
    result_model.print_model()
    print('Akurasi :')
    print(result_accuracy)
    print('Confussion Matrix untuk Virginica, Versicolor, Setosa :')
    print(ConfusionMatrixMLP(result_model, test_iris))

    return result_model
Пример #26
0
def train_model(train_file, hidden_neurons, mode, eta, threshold, saved_model):
    dataset = np.loadtxt(train_file, delimiter=',', skiprows=1)
    print('loaded training file!')
    sys.stdout.flush()
    X = np.round(dataset[:, 1:len(dataset[0])] / 255)
    Y = dataset[:, 0]
    Y = np.array([Number.from_number(int(y)).array for y in Y])
    if mode == 'recover' and os.path.isfile(saved_model):
        model = pickle.load(open(saved_model, 'rb'))
    else:
        model = mlp.MLP(input_layer_neurons=X.shape[1],
                        hidden_layer_neurons=hidden_neurons,
                        output_layer_neurons=10)
        if not os.path.isdir('trained'):
            os.mkdir('trained')

    print('training network with', hidden_neurons, 'hidden neurons:')
    sys.stdout.flush()
    model.learn(X, Y, eta, threshold, saved_model)
    if (mode != 'predict'):
        print('trained!')
        sys.stdout.flush()
def expt_001():
    train = load_sonar('data/sonar.train')
    dev = load_sonar('data/sonar.dev')
    test = load_sonar('data/sonar.test')

    gamma = 1  # the learning rate
    epochs = 100  # the maximum number of epochs

    layers = [12]  # one hidden layer with one hidden unit
    din = len(train[0][0])
    dout = len(train[0][1])

    m = mlp.MLP(layers, din, dout)

    lossfn = loss.sum_squares

    savefreq = 0  # store the model every savefreq steps
    logfreq = 1  # print updates every logfreq steps
    step = 0
    tsum = 0
    tbegin = time()
    for epoch in range(epochs):
        if ((epoch % logfreq) == 0):
            tloss = time()
            lss_train = m.loss(train, loss.zeroone)
            lss_dev = m.loss(dev, loss.zeroone)
            tloss = time() - tloss

            counter, tacc, dacc = epoch + 1, 1 - lss_train, 1 - lss_dev
            records[counter] = records.get(counter, {})
            records[counter]["train"] = records[counter].get("train",
                                                             []) + [tacc]
            records[counter]["dev"] = records[counter].get("dev", []) + [dacc]

            print('Accuracy after %d rounds is %f train, %f dev ' %
                  (epoch, tacc, dacc))
        m.gd_step(train, lossfn, gamma=gamma)
Пример #28
0
def main():

    mlp = MLP.MLP(sizeX * sizeY, bottleneck, sizeX * sizeY)
    if (path_weights != ""):
        mlp.load(path_weights)
        print("Weights init from {}".format(path_weights))

    dataset = Dataset.Dataset((sizeX, sizeY))
    # dataset.build("../dataset/matthieu2.txt", from_vid="../dataset/full-07_11_2019-001.avi")
    # dataset.build("../dataset/mb.txt", from_vid="../dataset/bout.avi")
    # dataset.build("../dataset/mbl.txt", from_vid="../dataset/louis.avi")
    # dataset.build("../dataset/mblr.txt", from_vid="../dataset/remi.avi")
    dataset.load(path_to_dataset)
    x_train, y_train, x_test, y_test, x_valid, y_valid = dataset.split_and_process(
        delta_time, test_ratio, valid_ratio)

    err = learnDataset(mlp, x_train, y_train, x_valid, y_valid, nb_epochs)

    datasetTest = Dataset.Dataset((sizeX, sizeY))
    datasetTest.load("../dataset/test.txt")
    x, y = datasetTest.process(5)
    test_activity(mlp, x, y)

    save_session_parameters()
Пример #29
0
def encode(eta, hidden, inputs, epochs):
    #Generate inputs
    X = gen_inputs(inputs)

    #Create the MLP
    MLP = mlp.MLP(eta, 1, hidden)

    #Initialize the MLP
    MLP.learn(X, X)

    #Stop condition
    not_converged = 1

    #counter until convergence
    i = 0

    while (not_converged):
        #Continue learning
        MLP.continue_learning(epochs)
        error = np.matrix.round(MLP.predict(X, X) - X)
        if (np.all(error == 0)):
            not_converged = 0
        i += epochs
    return MLP, i
Пример #30
0
def train_algo(params):
    """
        Fetch the total data to train the model on
    """
    dataset = data.DataReader(params['data']['type'])
    
    if params['data']['type'] == 'sql':
        dataset.fetch_data(con=dbconfig.connections_[params['data']['source']](), query=params['data']['detail'])
    elif params['data']['type'] == 'file':
        dataset.fetch_data(file=params['data']['source'])
    
    data_ = data.TrainingData(dataset, target='Put', data_split=params['data']['data_split'])

    """
        Construct the MLP and optimizer arguments
    """

    mlp_args = {'data':data_, **params['algo']}

    """
        Define and train the network
    """
    mdl = mlp.MLP(**mlp_args)

    # train the network
    train_history = mdl.sgd_train(**params['train'], optim_options=params['optim'])
    
    # save the model
    mdl.save(params['results']['model_path'])

    # test the network
    test_results = mdl.test()

    # show results
    mdl.test_scatter(test_results, save_fig= params['results']['test_scatter'])
    mdl.plot_train_valid_curve(train_history, save_fig= params['results']['train_curve'])