Esempio n. 1
0
def main(data, train_data, test_data):

    train_text, train_labels, test_text, test_labels = load_data(
        data,
        train_data,
        test_data,
        text_key='article',
        id_key=['source', 'source_index'])
    train(train_text, train_labels, test_text, test_labels)
Esempio n. 2
0
def main(data, train_data, test_data):

    CLASSES_TO_TRAIN = [0, 1, 2, 3, 4]

    # load the data
    train_text, train_labels, test_text, test_labels = load_data(
        data, train_data, test_data, id_key='__index__', text_key='text')
    # fix the data
    train_text, train_labels = fix_data(train_text, train_labels)
    test_text, test_labels = fix_data(test_text, test_labels)
    # train on the data
    train(train_text, train_labels, test_text, test_labels, CLASSES_TO_TRAIN)
Esempio n. 3
0
    def setUpClass(cls):

        # Number of points to train and test
        cls.npoints = 100
        cls.nfuture = 20

        # Setup data used for model
        x = np.linspace(0, 10 * np.pi, cls.npoints + cls.nfuture)
        y = np.sin(x)

        y_train, y_test = y[:cls.npoints], y[cls.npoints - 1:]

        cls.trainY = y_train
        cls.testY = y_test

        cls.input_file = 'input_test.json'

        if os.path.exists(cls.input_file):
            os.remove(cls.input_file)

        with open(cls.input_file, 'w') as f:
            json.dump(list(cls.trainY), f)

        cls.model = Models.RNN(1, 32, 1)
        cls.output_file = 'model_test.pt'

        if os.path.exists(cls.output_file):
            os.remove(cls.output_file)

        ml_utils.train(cls.model,
                       ml_utils.parse_input_list(y_train),
                       lr=0.003,
                       epochs=300,
                       teacher_forcing_ratio=1,
                       verbose=False)

        ml_utils.saveModel(cls.model, cls.output_file)
Esempio n. 4
0
    dataset = Autopo(data_folder, path, y_select)

    train_loader, val_loader, test_loader = split_balance_data(
        dataset, y_select[:3] == 'cls', batch_size)

    nf_size = 4
    ef_size = 3
    nnode = 4
    if args.model_index == 0:
        ef_size = 6

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    data = dataset[0].to(device)

    model = initialize_model(args.model_index, args.gnn_nodes,
                             args.predictor_nodes, args.gnn_layers, nf_size,
                             ef_size, device)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.01,
                                 weight_decay=5e-4)
    criterion = MSELoss(reduction='mean').to(device)

    # ========================= Train & Test ==========================#

    model = train(train_loader, val_loader, model, n_epoch, batch_size, nnode,
                  device, args.model_index, optimizer)

    test(test_loader, model, n_epoch, batch_size, nnode, args.model_index,
         y_select[:3] == 'cls', device, optimizer, th)
Esempio n. 5
0
import ml_utils
import Models

if __name__ == '__main__':

    train_array_file = '../api/sample_data/ml_input_foo.json'
    output_model_file = '../api/sample_data/ml_model_foo.pt'

    y = ml_utils.parse_input_file(train_array_file)

    input_dimension = 1
    hidden_dimension = 32
    output_dimension = 1

    model = Models.RNN(input_dimension, hidden_dimension, output_dimension)

    ml_utils.train(model,
                   y,
                   lr=0.001,
                   epochs=500,
                   teacher_forcing_ratio=1,
                   verbose=True)
    ml_utils.saveModel(model, output_model_file)