コード例 #1
0
ファイル: train.py プロジェクト: prahladanand17/DocEmbed
    def __init__(self):
        #Build dataloaders, vocabulary, and numericalize texts
        self.databunch = TextClasDataBunch.from_csv(args.data, bs = 10, csv_name='data.csv', pad_first=True, pad_idx = 1)

        '''
        Build word_to_idx and idx_to_word dictionaries
        for the dataset's vocabulary
        '''

        def build_word_to_idx(idx_to_word):
            word_to_idx = {}
            for i in range(len(idx_to_word)):
                word_to_idx[idx_to_word[i]] = i
            return word_to_idx
        idx_to_word = self.databunch.vocab.itos
        word_to_idx = build_word_to_idx(idx_to_word)

        models = {}

        models['LSTM'] = LSTM(vocab_size = len(idx_to_word), embedding_dim = 300, hidden_size = 300, word_to_idx = word_to_idx, glove_path = args.embedding)
        models['GloVe'] = Word_Vector_Model(vocab_size = len(idx_to_word), embedding_dim = 300, word_to_idx = word_to_idx, glove_path = args.embedding)
        models['GRU'] = GRU(vocab_size=len(idx_to_word), embedding_dim = 300, hidden_size = 300, word_to_idx = word_to_idx, glove_path = args.embedding)

        self.model = models[args.model]
        #self.model = nn.DataParallel(self.model)
        self.device = torch.device("cuda:0")
        self.model.to(self.device)

        self.train_dataloader = self.databunch.train_dl
        self.valid_dataloader = self.databunch.valid_dl

        self.epochs = 20
        self.learning_rate = 0.0001
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
        self.loss_function = nn.CrossEntropyLoss()
コード例 #2
0
    def init_model(self):
        '''
        pooling, rnn, lstm, bilstm, cnn, multi_cnn, gru
        :return:
        '''
        if self.opts.model == 'pooling':
            self.model = Pooling(opts=self.opts,
                                 vocab=self.vocab,
                                 label_vocab=self.label_vocab)
        elif self.opts.model == 'cnn':
            self.model = CNN(opts=self.opts,
                             vocab=self.vocab,
                             label_vocab=self.label_vocab)
        elif self.opts.model == 'multi_channel_cnn':
            self.model = Multi_Channel_CNN(opts=self.opts,
                                           vocab=self.vocab,
                                           label_vocab=self.label_vocab)
        elif self.opts.model == 'multi_layer_cnn':
            self.model = Multi_Layer_CNN(opts=self.opts,
                                         vocab=self.vocab,
                                         label_vocab=self.label_vocab)
        elif self.opts.model == 'char_cnn':
            self.char = True
            self.model = Char_CNN(opts=self.opts,
                                  vocab=self.vocab,
                                  char_vocab=self.char_vocab,
                                  label_vocab=self.label_vocab)
        elif self.opts.model == 'lstm':
            self.model = LSTM(opts=self.opts,
                              vocab=self.vocab,
                              label_vocab=self.label_vocab)
        elif self.opts.model == 'gru':
            self.model = GRU(opts=self.opts,
                             vocab=self.vocab,
                             label_vocab=self.label_vocab)
        elif self.opts.model == 'lstm_cnn':
            self.model = LSTM_CNN(opts=self.opts,
                                  vocab=self.vocab,
                                  label_vocab=self.label_vocab)
        elif self.opts.model == 'treelstm':
            self.tree = True
            self.model = BatchChildSumTreeLSTM(opts=self.opts,
                                               vocab=self.vocab,
                                               label_vocab=self.label_vocab)
        elif self.opts.model == 'cnn_treelstm':
            self.tree = True
            self.model = CNN_TreeLSTM(opts=self.opts,
                                      vocab=self.vocab,
                                      label_vocab=self.label_vocab)
        elif self.opts.model == 'lstm_treelstm':
            self.tree = True
            self.model = LSTM_TreeLSTM(opts=self.opts,
                                       vocab=self.vocab,
                                       label_vocab=self.label_vocab)
        else:
            raise RuntimeError('please choose your model first!')

        if self.opts.use_cuda:
            self.model = self.model.cuda()
コード例 #3
0
ファイル: TextRNN.py プロジェクト: aaronwwy/NLP-Pytorch
    def __init__(self, embedding_dim, output_dim, hidden_size, num_layers,
                 bidirectional, dropout, pretrained_embeddings):
        super(TextRNN, self).__init__()

        self.embedding = nn.Embedding.from_pretrained(pretrained_embeddings,
                                                      freeze=False)
        self.rnn = LSTM(embedding_dim, hidden_size, num_layers, bidirectional,
                        dropout)

        self.fc = Linear(hidden_size * 2, output_dim)
        self.dropout = nn.Dropout(dropout)
コード例 #4
0
    def __init__(self, vocab_size, embedding_dim, output_dim, hidden_size,
                 num_layers, bidirectional, dropout, pad_idx):
        super(TextRNN, self).__init__()

        self.embedding = nn.Embedding(vocab_size,
                                      embedding_dim,
                                      padding_idx=pad_idx)
        self.rnn = LSTM(embedding_dim, hidden_size, num_layers, bidirectional,
                        dropout)

        self.fc = nn.Linear(hidden_size * 2, output_dim)
        self.dropout = nn.Dropout(dropout)
コード例 #5
0
def get_model(model):
    """
    Get Model instance
    """
    assert model in ['CHAR', 'WIDE', 'VDCNN', 'LSTM', 'MULTI_LSTM']

    if model == 'CHAR': return Char_CNN(config, conv_layers, fc_layers)
    elif model == 'WIDE': return Wide_CNN(config, wconv_layers)
    elif model == 'VDCNN': return VDCNN2(config)
    elif model == 'LSTM': return LSTM(config, fc_layers)
    elif model == 'MULTI_LSTM':
        return MULTI_LSTM(config, fc_layers, rnn_layers)
コード例 #6
0
def initialize_model_and_trainer(model_properties, training_properties,
                                 datasetloader, device):
    logger.info("Model type is %s", training_properties["learner"])
    if training_properties["learner"] == "text_cnn":
        model = TextCnn(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer",
                                          training_properties, datasetloader,
                                          device)
    elif training_properties["learner"] == "gru":
        model = GRU(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer",
                                          training_properties, datasetloader,
                                          device)
    elif training_properties["learner"] == "lstm":
        model = LSTM(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer",
                                          training_properties, datasetloader,
                                          device)
    elif training_properties["learner"] == "char_cnn":
        model = CharCNN(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer",
                                          training_properties, datasetloader,
                                          device)
    elif training_properties["learner"] == "vdcnn":
        model = VDCNN(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer",
                                          training_properties, datasetloader,
                                          device)
    elif training_properties["learner"] == "conv_deconv_cnn":
        model = ConvDeconvCNN(model_properties)
        trainer = Trainer.trainer_factory("single_model_trainer",
                                          training_properties, datasetloader,
                                          device)
    elif training_properties["learner"] == "transformer_google":
        model = TransformerGoogle(model_properties).model.to(device)
        trainer = Trainer.trainer_factory("single_model_trainer",
                                          training_properties, datasetloader,
                                          device)
    elif training_properties["learner"] == "lstmcrf":
        assert training_properties["task"] == "ner"
        model = LSTMCRF(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_ner_trainer",
                                          training_properties, datasetloader,
                                          device)
    else:
        raise ValueError(
            "Model is not defined! Available learner values are : 'text_cnn', 'char_cnn', 'vdcnn', 'gru', "
            "'lstm', 'conv_deconv_cnn' and 'transformer_google'")

    return model, trainer
コード例 #7
0
    def __init__(self, embedding_dim, output_dim, hidden_size, num_layers, bidirectional, dropout, pretrained_embeddings):
        super(LSTMATT, self).__init__()

        self.embedding = nn.Embedding.from_pretrained(
            pretrained_embeddings, freeze=False)
        self.rnn = LSTM(embedding_dim, hidden_size,
                        num_layers, bidirectional, dropout)

        self.fc = nn.Linear(hidden_size * 2, output_dim)
        self.dropout = nn.Dropout(dropout)
        
        self.W_w = nn.Parameter(torch.Tensor(hidden_size * 2, hidden_size * 2))
        self.u_w = nn.Parameter(torch.Tensor(hidden_size * 2, 1))

        nn.init.uniform_(self.W_w, -0.1, 0.1)
        nn.init.uniform_(self.u_w, -0.1, 0.1)
コード例 #8
0
    def __init__(self, word_dim, char_dim, output_dim, hidden_size, num_layers,
                 bidirectional, dropout, word_emb, char_emb, highway_layers):
        super(TextRNNHighway, self).__init__()

        self.char_embedding = nn.Embedding.from_pretrained(char_emb,
                                                           freeze=False)
        self.word_embedding = nn.Embedding.from_pretrained(word_emb,
                                                           freeze=False)

        self.text_embedding = Embedding(highway_layers, word_dim, char_dim)

        self.rnn = LSTM(word_dim + char_dim, hidden_size, num_layers,
                        bidirectional, dropout)

        self.fc = Linear(hidden_size * 2, output_dim)
        self.dropout = nn.Dropout(dropout)
コード例 #9
0
def initialize_model_and_trainer(model_properties, training_properties, datasetloader, device):
    logger.info("Model type is %s", training_properties["learner"])
    if training_properties["learner"] == "text_cnn":
        model = TextCnn(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer", training_properties, datasetloader.train_iter,
                                          datasetloader.val_iter, datasetloader.test_iter, device)
    elif training_properties["learner"] == "gru":
        model = GRU(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer", training_properties, datasetloader.train_iter,
                                          datasetloader.val_iter, datasetloader.test_iter, device)
    elif training_properties["learner"] == "lstm":
        model = LSTM(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer", training_properties, datasetloader.train_iter,
                                          datasetloader.val_iter, datasetloader.test_iter, device)
    elif training_properties["learner"] == "char_cnn":
        model = CharCNN(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer", training_properties, datasetloader.train_iter,
                                          datasetloader.val_iter, datasetloader.test_iter, device)
    elif training_properties["learner"] == "vdcnn":
        model = VDCNN(model_properties).to(device)
        trainer = Trainer.trainer_factory("single_model_trainer", training_properties, datasetloader.train_iter,
                                          datasetloader.val_iter, datasetloader.test_iter, device)
    elif training_properties["learner"] == "conv_deconv_cnn":
        convDeconveCNN = ConvDeconvCNN(model_properties)
        encoderCNN = convDeconveCNN.encoder.to(device)
        decoderCNN = convDeconveCNN.decoder.to(device)
        classifier = convDeconveCNN.classifier.to(device)
        trainer = Trainer.trainer_factory("multiple_model_trainer", training_properties, datasetloader.train_iter,
                                          datasetloader.val_iter, datasetloader.test_iter, device)
        model = [encoderCNN, decoderCNN, classifier]
    elif training_properties["learner"] == "transformer_google":
        model = TransformerGoogle(model_properties).model.to(device)
        trainer = Trainer.trainer_factory("single_model_trainer", training_properties, datasetloader.train_iter,
                                          datasetloader.val_iter, datasetloader.test_iter, device)
    elif training_properties["learner"] == "crf":
        model = ConditionalRandomField().to(device)
    else:
        raise ValueError("Model is not defined! Available learner values are : 'text_cnn', 'char_cnn', 'vdcnn', 'gru', "
                         "'lstm', 'conv_deconv_cnn' and 'transformer_google'")

    return model, trainer
コード例 #10
0
    def __init__(self, word_dim, char_dim, output_dim, hidden_size, num_layers,
                 bidirectional, dropout, word_emb, char_emb, highway_layers):
        super(LSTMATTHighway, self).__init__()

        self.char_embedding = nn.Embedding.from_pretrained(char_emb,
                                                           freeze=False)
        self.word_embedding = nn.Embedding.from_pretrained(word_emb,
                                                           freeze=False)

        self.text_embedding = Embedding(highway_layers, word_dim, char_dim)

        self.rnn = LSTM(word_dim + char_dim, hidden_size, num_layers,
                        bidirectional, dropout)

        self.fc = nn.Linear(hidden_size * 2, output_dim)
        self.dropout = nn.Dropout(dropout)

        self.W_w = nn.Parameter(torch.Tensor(hidden_size * 2, hidden_size * 2))
        self.u_w = nn.Parameter(torch.Tensor(hidden_size * 2, 1))

        nn.init.uniform_(self.W_w, -0.1, 0.1)
        nn.init.uniform_(self.u_w, -0.1, 0.1)
コード例 #11
0
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)

# # save np.load
# np_load_old = np.load
# # modify the default parameters of np.load
# np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
num_words = 20000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words)

epoch = 5
batch_size = 64

word_to_index = imdb.get_word_index()
word_to_index = {key:(value+3) for key,value in word_to_index.items()}
word_to_index["<PAD>"] = 0
word_to_index["<START>"] = 1
word_to_index["<UNK>"] = 2
index_to_word = {value:key for key,value in word_to_index.items()}

def print_sentence(id_list):
    print(' '.join([index_to_word[id] for id in id_list if id != 0]))

print("Train-set size: ", len(x_train))
print("Test-set size:  ", len(x_test))

x_train, x_test, max_tokens = utils.pad_sequences(train_sequences=x_train, test_sequences=x_test)

lstm = LSTM(num_words, max_tokens)
lstm.build()
lstm.train(epochs=epoch, x_train=x_train, y_train=y_train, batch_size=batch_size)
コード例 #12
0
def main():
    parser = argparse.ArgumentParser()
    # parameters
    parser.add_argument("--epoch",
                        default=100,
                        type=int,
                        help="the number of epoches needed to train")
    parser.add_argument("--lr",
                        default=2e-5,
                        type=float,
                        help="the learning rate")
    parser.add_argument("--classifier",
                        default='lstm',
                        type=str,
                        help="the classifier, such as LSTM, CNN ...")
    parser.add_argument("--hidden_size",
                        default=64,
                        type=int,
                        help="the hidden size")
    parser.add_argument("--output_size",
                        default=1,
                        type=int,
                        help="the output label size")
    parser.add_argument("--early_stopping",
                        default=15,
                        type=int,
                        help="Tolerance for early stopping (# of epochs).")
    parser.add_argument("--load_model",
                        default=None,
                        type=str,
                        help="load pretrained model for testing")

    parser.add_argument('--n_models', nargs='+')

    args = parser.parse_args()

    path = 'D:/MyDocument/Project/CarSalesPrediction/'
    T = 12
    data = dataset.read_data(path)
    df, feature_list, y_list = dataset.get_feature(data, T)
    train_x, train_y, valid_x, valid_y, test_x, model2index = dataset.get_Xy(
        df, T, feature_list, y_list)

    X_train, X_val, X_test = dataset.normalization(train_x, valid_x, test_x)
    input_size = X_train.shape[2]
    print("feature_dim: ", input_size)

    model = LSTM(args.output_size, [args.hidden_size], input_size)

    X_train = torch.from_numpy(X_train.astype(np.float32))
    y_train = torch.from_numpy(train_y.astype(np.float32))
    X_val = torch.from_numpy(X_val.astype(np.float32))
    y_val = torch.from_numpy(valid_y.astype(np.float32))
    X_test = torch.from_numpy(X_test.astype(np.float32))

    y_mean = []
    train_y = []
    valid_y = []
    for step in range(4):
        yy = y_train[:, step].view(-1, 1)
        train_y.append(yy)
        mean_y = yy.mean()
        y_mean.append(mean_y)
        valid_y.append(y_val[:, step])

    # print (X_val)
    # print(X_val)

    if args.load_model:
        evaluation_public = pd.read_csv(
            path + 'test2_dataset/evaluation_public.csv')[[
                'id', 'regMonth', 'forecastVolum'
            ]]
        id_list = evaluation_public['id'].values
        evaluation_result = DataFrame({'id': id_list})
        # forecastVolum = []
        for ii, model_name in enumerate(args.n_models):
            print(model_name)
            model.load_state_dict(
                torch.load(args.load_model + '/' + model_name))
            model.eval()
            val_prediction = model(X_val)
            score = get_score(
                np.expm1(val_prediction.detach().numpy() + y_mean[ii].numpy()),
                np.expm1(valid_y[ii].numpy()), model2index)
            logger.info('Valid Score:%.4f', score)
            test_prediction = model(X_test)
            test_prediction = np.expm1(test_prediction.detach().numpy() +
                                       y_mean[ii].numpy()).reshape(
                                           -1).tolist()
            evaluation_public.loc[(evaluation_public.regMonth == ii + 1),
                                  'forecastVolum'] = test_prediction
            # forecastVolum.extend(test_prediction)
            del model
            model = LSTM(args.output_size, [args.hidden_size], input_size)
        # evaluation_result['forecastVolum'] = forecastVolum
        evaluation_public[['id', 'forecastVolum']].round().astype(int).to_csv(
            'evaluation_public.csv', index=False)
        exit()

    for i in range(4):
        train_model(model, X_train, train_y[i], y_mean[i], X_val, valid_y[i],
                    args.epoch, model2index, args.lr, i)
コード例 #13
0
ファイル: main.py プロジェクト: Mizar77/ABSA1
args.embed_num = len(text_field.vocab)
args.class_num = len(
    label_field.vocab) - 1  # 4, positive, negative, neutral, conflict
args.text_field = text_field

args.cuda = (not args.no_cuda) and torch.cuda.is_available()
del args.no_cuda

print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
    print("\t{}={}".format(attr.upper(), value))

models = [
    TD_LSTM(args),
    TC_LSTM(args),
    LSTM(args),
    AE_LSTM(args),
    ATAE_LSTM(args),
    IAN(args),
    RAM(args)
]
models_name = [
    "TD_LSTM", "TC_LSTM", "LSTM", "AE_LSTM", "ATAE_LSTM", "IAN_LSTM", "RAM"
]

if args.cuda:
    torch.cuda.set_device(args.device)
    for i in range(len(models)):
        models[i] = models[i].cuda()

# train and test for all models
コード例 #14
0
ファイル: test_LSTM.py プロジェクト: ezosa/DL2021-project
log_file = open(save_path + model_name + "_test_logs.txt", 'a+')

# prepare test loader for the test set
test_file = args.data_path + args.test_file
test_data = ArticlesDataset(csv_file=test_file, vocab=vocab, label2id=label2id)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False)

scores_dict = {'f1': [], 'recall': [], 'precision': [], 'confidence': []}

for run_num in range(args.num_runs):
    model_run_name = model_name + "_run" + str(run_num + 1)
    print("-" * 10, "Run", run_num + 1, "-" * 10)
    print("Model name:", model_run_name)
    print("Loading model from", save_path + model_run_name + ".pt")

    best_model = LSTM(lstm_args=lstm_args, mlp_args=mlp_args).to(device)

    optimizer = torch.optim.Adam(best_model.parameters(), lr=0.005)
    load_checkpoint(save_path + model_run_name + ".pt", best_model, optimizer,
                    device, log_file)

    results = evaluate(best_model, test_loader)
    scores_dict['f1'].append(results['f1'])
    scores_dict['recall'].append(results['recall'])
    scores_dict['precision'].append(results['precision'])

    # if args.save_confidence is True:
    #     scores_dict['confidence'].append(results['confidence'])
    #     scores_dict['labels'].append(results['labels'])
    #     scores_dict['content'].append(results['content'])
    #     sentence_encodings = results['sentence_encodings']
コード例 #15
0
ファイル: CSL_Skeleton_LSTM.py プロジェクト: Simon-CSU/SLR
hidden1 = 512
drop_p = 0.0

# Train with Skeleton+LSTM
if __name__ == '__main__':
    # Load data
    transform = None # TODO
    train_set = CSL_Skeleton(data_path=data_path, label_path=label_path, frames=sample_duration,
        num_classes=num_classes, selected_joints=selected_joints, train=True, transform=transform)
    val_set = CSL_Skeleton(data_path=data_path, label_path=label_path, frames=sample_duration,
        num_classes=num_classes, selected_joints=selected_joints, train=False, transform=transform)
    logger.info("Dataset samples: {}".format(len(train_set)+len(val_set)))
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    # Create model
    model = LSTM(lstm_input_size=lstm_input_size, lstm_hidden_size=lstm_hidden_size, lstm_num_layers=lstm_num_layers,
        num_classes=num_classes, hidden1=hidden1, drop_p=drop_p).to(device)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
        logger.info("Using {} GPUs".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)
    # Create loss criterion & optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # Start training
    logger.info("Training Started".center(60, '#'))
    for epoch in range(epochs):
        # Train the model
        train_epoch(model, criterion, optimizer, train_loader, device, epoch, logger, log_interval, writer)

        # Validate the model
コード例 #16
0
def main(args):
    best_er1 = 0

    # Check if CUDA is enabled
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    # Load data
    root = args.datasetPath

    files = []
    train_ids = []
    test_ids = []

    if os.path.isfile('file_list.txt'):
        print("File exists")
    else:
        print("File does not exist")
        print("Prepare files")
        files = [
            f for f in os.listdir(root)
            if os.path.isfile(os.path.join(root, f))
        ]

        idx = np.random.permutation(len(files))
        idx = idx.tolist()
        files = [files[i] for i in idx[:]]

        with open('file_list.txt', "w") as myfile:
            for f in files:
                myfile.write("%s\n" % f)

    file2 = open("file_list.txt")
    files = [line[:-1] for line in file2]
    file2.close()

    chunk = int(len(files) / 10)

    train_ids = []
    test_ids = []

    print(len(files))
    for i in range(10):
        if i == int(args.fold):
            test_ids = files[i * chunk:i * chunk + chunk]
            print("test: " + str(i * chunk) + ":" + str(i * chunk + chunk))
            continue

        train_ids += files[i * chunk:i * chunk + chunk]
        print("train: " + str(i * chunk) + ":" + str(i * chunk + chunk))

    all_data = utils.Qm9(
        root,
        files,
        edge_transform=datasets.qm9_edges,
        e_representation="raw_distance",
    )

    data_train = utils.Qm9(
        root,
        train_ids,
        edge_transform=datasets.qm9_edges,
        e_representation="raw_distance",
    )

    data_test = utils.Qm9(
        root,
        test_ids,
        edge_transform=datasets.qm9_edges,
        e_representation="raw_distance",
    )

    # Define model and optimizer
    print("Define model")
    # Select one graph
    g_tuple, l = data_train[1]
    g, h_t, e = g_tuple

    print("\tStatistics")
    stat_dict = datasets.get_graph_stats(all_data,
                                         ["target_mean", "target_std"])

    # Identify atoms in all files

    data_train.set_target_transform(lambda x: datasets.normalize_data(
        x, stat_dict["target_mean"], stat_dict["target_std"]))

    data_test.set_target_transform(lambda x: datasets.normalize_data(
        x, stat_dict["target_mean"], stat_dict["target_std"]))

    # Data Loader
    train_loader = torch.utils.data.DataLoader(
        data_train,
        batch_size=args.batch_size,
        shuffle=True,
        collate_fn=datasets.collate_g,
        num_workers=args.prefetch,
        pin_memory=True,
    )

    test_loader = torch.utils.data.DataLoader(
        data_test,
        batch_size=args.batch_size,
        collate_fn=datasets.collate_g,
        num_workers=args.prefetch,
        pin_memory=True,
    )

    print("\tCreate model")
    in_n = [len(h_t[0]), len(list(e.values())[0])]
    hidden_state_size = 73
    message_size = 73
    n_layers = 3
    l_target = len(l)
    type = "regression"
    if args.model == "MPNNv2":
        model = MPNNv2(in_n, [5, 15, 15], [10, 20, 20], l_target, type=type)
    elif args.model == "MPNNv3":
        model = MPNNv3([1, 2, 3, 4],
                       in_n, [5, 15, 15],
                       30,
                       l_target,
                       type=type)
    elif args.model == "LSTM":
        model = LSTM(in_n,
                     hidden_state_size,
                     message_size,
                     n_layers,
                     l_target,
                     type=type)
    else:
        model = MPNN(in_n,
                     hidden_state_size,
                     message_size,
                     n_layers,
                     l_target,
                     type=type)

    del in_n, hidden_state_size, message_size, n_layers, l_target, type

    print("Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    criterion = nn.MSELoss()

    evaluation = lambda output, target: torch.mean(
        torch.abs(output - target) / torch.abs(target))

    print("Logger")
    logger = Logger(args.logPath)

    lr_step = (args.lr - args.lr * args.lr_decay) / (
        args.epochs * args.schedule[1] - args.epochs * args.schedule[0])

    # get the best checkpoint if available without training
    if args.resume:
        checkpoint_dir = args.resume
        best_model_file = os.path.join(checkpoint_dir, "model_best.pth")
        if not os.path.isdir(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        if os.path.isfile(best_model_file):
            print("=> loading best model '{}'".format(best_model_file))
            checkpoint = torch.load(best_model_file)
            args.start_epoch = checkpoint["epoch"]
            best_acc1 = checkpoint["best_er1"]
            model.load_state_dict(checkpoint["state_dict"])
            optimizer.load_state_dict(checkpoint["optimizer"])
            print("=> loaded best model '{}' (epoch {})".format(
                best_model_file, checkpoint["epoch"]))
        else:
            print("=> no best model found at '{}'".format(best_model_file))

    print("Check cuda")
    if args.cuda:
        print("\t* Cuda")
        model = model.cuda()
        criterion = criterion.cuda()

    # Epoch for loop
    for epoch in range(0, args.epochs):

        if (epoch > args.epochs * args.schedule[0]
                and epoch < args.epochs * args.schedule[1]):
            args.lr -= lr_step
            for param_group in optimizer.param_groups:
                param_group["lr"] = args.lr

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, evaluation,
              logger)

        # evaluate on test set
        #er1 = validate(valid_loader, model, criterion, evaluation, logger)

        #is_best = er1 > best_er1
        #best_er1 = min(er1, best_er1)

        is_best = True
        best_er1 = 1

        datasets.save_checkpoint(
            {
                "epoch": epoch + 1,
                "state_dict": model.state_dict(),
                "best_er1": best_er1,
                "optimizer": optimizer.state_dict(),
            },
            is_best=is_best,
            directory=args.resume,
        )

        # Logger step
        logger.log_value("learning_rate", args.lr).step()

    # get the best checkpoint and test it with test set
    if args.resume:
        checkpoint_dir = args.resume
        best_model_file = os.path.join(checkpoint_dir, "model_best.pth")
        if not os.path.isdir(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        if os.path.isfile(best_model_file):
            print("=> loading best model '{}'".format(best_model_file))
            checkpoint = torch.load(best_model_file)
            args.start_epoch = checkpoint["epoch"]
            best_acc1 = checkpoint["best_er1"]
            model.load_state_dict(checkpoint["state_dict"])
            if args.cuda:
                model.cuda()
            optimizer.load_state_dict(checkpoint["optimizer"])
            print("=> loaded best model '{}' (epoch {})".format(
                best_model_file, checkpoint["epoch"]))
        else:
            print("=> no best model found at '{}'".format(best_model_file))

    # For testing
    validate(test_loader, model, criterion, evaluation)
コード例 #17
0

#method = "train"
method = "read"



cnn = CNN()
cnn.data(train_data, train_labels, test_data, test_labels)
cnn.createModel()
cnn.findBestWeights(method)
cnn.testModel()

print()

#method = "train"
method = "read"


lstm = LSTM()
lstm.data(train_data, train_labels, test_data, test_labels)
lstm.createModel()
lstm.findBestWeights(method)
lstm.testModel()