コード例 #1
0
def main_training(comment=''):

    data = Data(V.images_train_dir, V.labels_train_txt)
    net = model.attention_network(data)

    validation_set = Data(V.images_valid_dir, V.labels_valid_txt)
    validation_data = validation_set.generator(4000).__next__()  # (x_val, y_val)

    now = datetime.datetime.now().replace(microsecond=0)
    name = datetime.date.today().isoformat() + '-' + now.strftime("%H-%M-%S")
    os.makedirs(V.experiments_folder + "/keras/" + name + '/weights/')
    #comment = input("Enter (or not) a comment: ")

    with open(V.experiments_folder + "/keras/" + name + "/comment.txt", "w") as f:
        f.write('   # init ')
        f.write(comment)

    train_model(net, data, name,
                validation_data=validation_data,
                learning_rate=0.001,
                loss='categorical_crossentropy',
                batch_size=8,
                epoch=22,
                steps_per_epoch=10000)
    net = load_xp_model(name)
    test_model(net, name)
コード例 #2
0
ファイル: run.py プロジェクト: naryrelation/TRFR
def gen_y_test(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # Dataset functions
    envityvectorpath = args.ev
    relationvectorpath = args.rv
    entityvector = loadvector(envityvectorpath)
    relationvector = loadvector(relationvectorpath)
    vector = dict(entityvector, **relationvector)
    print('Loading vectors.')
    input_vocab = Vocabulary(args.invocab, vector, padding=args.padding)
    output_vocab_entity = Vocabulary(args.evocab,
                                     vector, padding=args.padding)
    output_vocab_relation = Vocabulary(args.revocab,
                                       vector, padding=args.padding)

    print('Loading datasets.')
    #save y_test 
    test2 = Data(args.test_data, input_vocab, output_vocab_entity,output_vocab_relation)
    test2.load()
    target_list1 = test2.targets1
    #target_list2 = test2.targets2
    path = './results/y_test'
    with open(path, 'w') as f:
        for i in range(len(target_list1)):
            #f.write(str(i) + '\t'+target_list1[i]+'\t'+target_list2[i]+'\n')
            f.write(str(i) + '\t' + target_list1[i]  + '\n')
    print('ytest in file')
コード例 #3
0
def see_animation(name):
    data = Data(V.images_train_dir, V.labels_train_txt)

    net = load_xp_model(name)
    net_attention = create_net_attention_maps(net, name)

    images, labels = data.generator(1).__next__()

    preds = data.decode_labels(data.pred2OneHot(net.predict(images)))
    atts = net_attention.predict(images)

    im_index = 0
    image = images[im_index, :, :, 0]
    pred = preds[im_index]
    att = atts[im_index, :, :, 0]

    ratio = image.shape[0] / att.shape[0]
    list_images_att = []
    for a in att:
        image_att = np.copy(image)
        for x in range(image.shape[0]):
            image_att[x, :] = image_att[x, :] * a[int(x / ratio)]

        list_images_att.append(
            [imshow(np.rot90(image_att, k=1), animated=True)])
    list_images_att.append(
        [imshow(np.rot90(np.zeros((384, 28)), k=1), animated=True)])

    fig = figure()
    ani = animation.ArtistAnimation(fig,
                                    list_images_att,
                                    interval=800,
                                    blit=True,
                                    repeat_delay=6e5)
    show()
コード例 #4
0
    def _build_dataset(self):
        self.start_id = start_id(self.output_vocab)
        self.end_id = end_id(self.output_vocab)
        data_file = ("./data/validation.csv"
                     if self.opts.infer else "./data/training.csv")
        data = Data(data_file, self.input_vocab, self.output_vocab)
        data.load()
        transform(data)
        vocab = (self.input_vocab, self.output_vocab)
        self.generator = DataGenerator(data, vocab, self.opts, self.start_id,
                                       self.end_id)
        items = next(self.generator)
        output_types = {i: tf.dtypes.as_dtype(items[i].dtype) for i in items}
        output_shapes = {i: tf.TensorShape(items[i].shape) for i in items}
        total_bytes = 0
        for i in items:
            total_bytes += items[i].nbytes
        dataset = tf.data.Dataset.from_generator(self.generator,
                                                 output_types=output_types,
                                                 output_shapes=output_shapes)
        infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset,
                                                       "InfeedQueue",
                                                       replication_factor=1)
        data_init = infeed_queue.initializer

        return dataset, infeed_queue, data_init, vocab
コード例 #5
0
def test_model(enc, dec, name):
    print(" ")
    print("_____testing %s______" % name)

    data_test = Data(V.images_test_dir, V.labels_test_txt)
    gen_test = data_test.generator(3000)
    inputs, labels = adapt_data_format(*gen_test.__next__())
    outputs = net(inputs)

    loss, label_error, word_error = errors(outputs, labels, data_test)

    path = '/%s/pytorch/%s/Test/' % (V.experiments_folder, name)
    path_loss = path + 'loss.csv'
    path_prediction = path + 'prediction.csv'
    save_in_files(path_loss, path_prediction, loss, label_error, word_error,
                  outputs, labels, data_test)
コード例 #6
0
def test_model(net, name):
    print(' ------ testing ------')
    os.makedirs(V.experiments_folder + "/keras/" + name + '/Test/')

    data = Data(V.images_test_dir, V.labels_test_txt)
    images, labels = data.generator(4450).__next__()
    decoded_label = data.decode_labels(labels, depad=True)

    prediction = net.predict(images)
    argmax_prediction = data.pred2OneHot(prediction)
    decoded_prediction = data.decode_labels(argmax_prediction, depad=True)

    with open(V.experiments_folder + "/keras/" + name + '/Test/predictions.csv', 'w') as f:
        fieldnames = ['label', 'prediction', 'error']
        writer = csv.DictWriter(f, fieldnames=fieldnames)

        writer.writeheader()

        for l, p in zip(decoded_label, decoded_prediction):
            writer.writerow({'label': l, 'prediction': p, 'error': CER(l, p)})

    cross_val = net.evaluate(images, labels, verbose=False)
    label_error = [CER(l, p) for l, p in zip(decoded_label, decoded_prediction)]
    label_error_mean = np.sum(label_error) / len(label_error)
    word_error = [0 if cer == 0 else 1 for cer in label_error]
    word_error_mean = np.mean(word_error)

    with open(V.experiments_folder + "/keras/" + name + '/Test/loss.csv', 'w') as f:
        fieldnames = ['name', 'value']
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        writer.writerow({'name': 'cross-entropie', 'value': cross_val})
        writer.writerow({'name': 'label error', 'value': label_error_mean})
        writer.writerow({'name': 'word error', 'value': word_error_mean})
コード例 #7
0
def maps(name):
    data = Data(V.images_train_dir, V.labels_train_txt)

    net = load_xp_model(name)
    net_attention = create_net_attention_maps(net, name)

    images, labels = data.generator(10).__next__()

    preds = data.decode_labels(data.pred2OneHot(net.predict(images)))
    atts = net_attention.predict(images)

    panel = np.zeros((35, 35, 3), dtype=np.int)
    att = atts[0, :, :, 0]

    x = 4
    y = 2
    panel[x:x + att.shape[0], y:y + att.shape[1], 1] = att * 255 / np.max(att)
    panel = cv2.resize(panel, (500, 500))

    cv2.imwrite('experiments/' + name + '/panel.jpg', panel)

    print('fin')
コード例 #8
0
ファイル: run.py プロジェクト: naryrelation/TRFR
def testmodel(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # Dataset functions
    envityvectorpath = args.ev
    relationvectorpath = args.rv
    entityvector = loadvector(envityvectorpath)
    relationvector = loadvector(relationvectorpath)
    vector = dict(entityvector, **relationvector)
    print('Loading vectors.')
    input_vocab = Vocabulary(args.invocab, vector,padding=args.padding)
    output_vocab_entity = Vocabulary(args.evocab,
                              vector,padding=args.padding)
    output_vocab_relation = Vocabulary(args.revocab,
                                     vector, padding=args.padding)

    print('Loading datasets.')
    test=Data(args.test_data, input_vocab, output_vocab_entity,output_vocab_relation)
    test.load()
    test.transform(vector)

    print('Test Datasets Loaded.')

    model=load_model('./savemodel/model1.h5',custom_objects={'AttentionLayer': AttentionLayer})
    print('Model Loaded. Start test.')
    #prediction = model.predict([test.inputs1, test.inputs2,test.inputs3,test.inputs4, test.inputs5])
    prediction = model.predict([test.inputs1, test.inputs2, test.inputs3])

    #/result/y_pre
    p_prediction1 = list(prediction.flatten())
    #p_prediction2 = list(prediction[1].flatten())
    #num_entity = output_vocab_entity.size()
    num_relation = output_vocab_relation.size()
    # for m in range(int(len(p_prediction)/num)):
    #     prediction_list.append('')
    prediction_list1 = [[0 for col in range(num_relation)] for row in range(int(len(p_prediction1)/num_relation))]
    #prediction_list2 = [[0 for col in range(num_entity)] for row in range(int(len(p_prediction2) / num_entity))]
    for i in range(len(p_prediction1)):
        j = int(i / num_relation)
        k = i % num_relation
        prediction_list1[j][k]=[k,p_prediction1[i]]
    # for i in range(len(p_prediction2)):
    #     j = int(i / num_entity)
    #     k = i % num_entity
    #     prediction_list2[j][k]=[k,p_prediction2[i]]
    pretarget1 = []
    pretarget2 = []
    for i in range(len(prediction_list1)):
        templist1 = prediction_list1[i]
        templist1.sort(key=takeSecond, reverse=True)
        templist11 = output_vocab_relation.int_to_string(templist1)
        pretarget1.append(templist11[:5])
        pretarget2.append(templist1)
    listinfile(pretarget1, './results/y_pre1')
    listinfile(pretarget2, './results/y_pre2')
    print('ypre1 in file')
コード例 #9
0
ファイル: run.py プロジェクト: theSage21/keras-attention
def main(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # Dataset functions
    input_vocab = Vocabulary('./data/human_vocab.json', padding=args.padding)
    output_vocab = Vocabulary('./data/machine_vocab.json',
                              padding=args.padding)

    print('Loading datasets.')

    training = Data(args.training_data, input_vocab, output_vocab)
    validation = Data(args.validation_data, input_vocab, output_vocab)
    training.load()
    validation.load()
    training.transform()
    validation.transform()

    print('Datasets Loaded.')
    print('Compiling Model.')
    model = simpleNMT(pad_length=args.padding,
                      n_chars=input_vocab.size(),
                      n_labels=output_vocab.size(),
                      embedding_learnable=False,
                      encoder_units=256,
                      decoder_units=256,
                      trainable=True,
                      return_probabilities=False)

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', all_acc])
    print('Model Compiled.')
    print('Training. Ctrl+C to end early.')

    try:
        kwargs = dict(generator=training.generator(args.batch_size),
                      steps_per_epoch=100,
                      validation_data=validation.generator(args.batch_size),
                      validation_steps=100,
                      callbacks=[cp],
                      workers=1,
                      verbose=1,
                      epochs=args.epochs)
        model.fit_generator(**kwargs)

    except KeyboardInterrupt as e:
        print('Model training stopped early.')

    print('Model training complete.')

    run_examples(model, input_vocab, output_vocab)
コード例 #10
0
from data.reader import Data
from data.vars import Vars

import numpy as np

V = Vars()

train = Data(V.images_train_dir, V.labels_train_txt, normalize_pixels=False)
test = Data(V.images_test_dir, V.labels_test_txt, normalize_pixels=False)

gen_train = train.generator(4000)
gen_test = test.generator(4000)

images_train, _ = gen_train.__next__()
images_test, _ = gen_test.__next__()

print("mean train %f" % np.mean(images_train))
print("mean test %f" % np.mean(images_test))
print("var train %f" % np.std(images_train))
print("var test %f" % np.std(images_test))
コード例 #11
0
    import matplotlib.pyplot as plt

    # the name of the experiment folder is the date in format year-month-day-hour-minute-second
    now = datetime.datetime.now().replace(microsecond=0)
    name = datetime.date.today().isoformat() + '-' + now.strftime("%H-%M-%S")

    # experiment setup
    nb_epoch = 10
    batch_size = 8
    nb_batch = int(len(os.listdir(V.images_train_dir)) / batch_size)
    lr = 1e-3
    optimizer = 'Adam'
    attention_method = 'gen'        # 'dot'
    loss_function = 'cross_entropy'

    data_train = Data(V.images_train_dir, V.labels_train_txt)
    data_valid = Data(V.images_valid_dir, V.labels_valid_txt)

    input_dim = [data_train.im_length, data_train.im_height]
    feature_map_dim = 224
    dec_state_size = 256
    output_vocab_size = data_train.vocab_size

    net = EncoderDecoder(input_dim, feature_map_dim, dec_state_size, output_vocab_size)

    net = train_model_1(net, name,
                        data_train=data_train, data_valid=data_valid,
                        nb_epoch=nb_epoch, nb_batch=nb_batch, batch_size=batch_size,
                        lr=lr, optimizer=optimizer,
                        attention_method=attention_method, loss_function=loss_function)
コード例 #12
0
ファイル: run.py プロジェクト: naryrelation/TRFR
def main(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # Dataset functions
    envityvectorpath =args.ev
    relationvectorpath =args.rv
    entityvector = loadvector(envityvectorpath)
    relationvector = loadvector(relationvectorpath)
    vector = dict(entityvector, **relationvector)
    print('Loading vectors.')
    input_vocab = Vocabulary(args.invocab, vector, padding=args.padding)
    output_vocab_entity = Vocabulary(args.evocab,
                                     vector, padding=args.padding)
    output_vocab_relation = Vocabulary(args.revocab,
                                       vector, padding=args.padding)

    print('Loading datasets.')

    training = Data(args.training_data, input_vocab, output_vocab_entity,output_vocab_relation)
    validation = Data(args.validation_data, input_vocab, output_vocab_entity,output_vocab_relation)
    test=Data(args.test_data, input_vocab, output_vocab_entity,output_vocab_relation)
    training.load()
    validation.load()
    test.load()
    training.transform(vector)
    validation.transform(vector)
    test.transform(vector)

    print('Datasets Loaded.')
    print('Compiling Model.')
    model = simpleNMT2(pad_length=args.padding,
                      n_chars=100,
                      entity_labels=output_vocab_entity.size(),
                      relation_labels=output_vocab_relation.size(),
                      dim=100,
                      embedding_learnable=False,
                      encoder_units=args.units,
                      decoder_units=args.units,
                      trainable=True,
                      return_probabilities=False,
                      )

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    print('Model Compiled.')
    print('Training. Ctrl+C to end early.')

    try:
        hist=model.fit([training.inputs1,training.inputs2,training.inputs3,training.inputs4,training.inputs5],[training.targets1],epochs=args.epochs,batch_size=args.batch_size,validation_split=0.05)


    except KeyboardInterrupt as e:
        print('Model training stopped early.')
    model.save('./savemodel/model1.h5')
    print('Model training complete.')
コード例 #13
0
def train_model_1(net, name, nb_epoch=1, nb_batch=1, batch_size=1, lr=1e-4):
    print(' ')
    print("_____training_____")

    optimizer = optim.Adam(net.parameters(), lr=lr)
    data_train = Data(V.images_train_dir, V.labels_train_txt)
    data_valid = Data(V.images_valid_dir, V.labels_valid_txt)
    best_loss_valid = 666
    learning_data = []
    loss_train_cumulate = []

    for epoch in range(1, nb_epoch):
        gen_train = data_train.generator(batch_size)
        print(' ')
        print('epoch : %d' % epoch)

        inputs, labels = gen_train.__next__()
        batch = 1
        while inputs.shape[0] != 0:
            inputs, _, labels = adapt_data_format(
                inputs,
                labels)  # the Data class was first developed for a keras
            #  model and needs adjustment to be used with the torch one
            optimizer.zero_grad()

            outputs = net(inputs)

            loss = nn.functional.cross_entropy(outputs, labels)
            loss_train_cumulate.append(loss.item())
            loss.backward()
            optimizer.step()

            if batch % 100 == 99:
                print(' ')
                print(' epoch  %d/%d,    batch  %d/%d' %
                      (epoch, nb_epoch, batch, nb_batch))

                gen_valid = data_valid.generator(300)
                inputs, _, labels = adapt_data_format(*gen_valid.__next__())

                outputs = net(inputs)

                loss_train = np.mean(loss_train_cumulate)
                loss_train_cumulate = []
                loss_valid, label_error, word_error = errors(
                    outputs, labels, data_valid)
                learning_data.append({
                    'epoch': epoch,
                    'batch': batch,
                    'loss_valid': loss_valid,
                    'loss_train': loss_train,
                    'label_error': label_error,
                    'word_error': word_error
                })
                learing_summary = pd.DataFrame(learning_data)
                learing_summary.to_csv(
                    path_or_buf='%s/pytorch/%s/learning_summary.csv' %
                    (V.experiments_folder, name))

                path = "%s/pytorch/%s/Training/e%d-b%d/" % (
                    V.experiments_folder, name, epoch, batch)
                os.makedirs(path)
                save_in_files(path, loss_valid, label_error, word_error,
                              outputs, labels, data_valid)

                print('     loss_valid    %f' % loss_valid)
                print('     loss_train    %f' % loss_train)
                print('     label_error   %f' % label_error)
                print('     word_error    %f' % word_error)

                if loss_valid < best_loss_valid:
                    print('     *  new best loss valid    %f' % loss_valid)
                    torch.save(
                        net, '%s/pytorch/%s/Weights/net-e%d-b%d' %
                        (V.experiments_folder, name, epoch, batch))
                    best_loss_valid = loss_valid

            inputs, labels = gen_train.__next__()
            batch += 1

    return net
コード例 #14
0
ファイル: run.py プロジェクト: FrancescoZ/Chemception
def main(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # Dataset functions
    input_vocab = Vocabulary('./data/human_vocab.json', padding=args.padding)
    output_vocab = Vocabulary('./data/machine_vocab.json',
                              padding=args.padding)

    print('Loading datasets.')

    training = Data(args.training_data, input_vocab, output_vocab)
    validation = Data(args.validation_data, input_vocab, output_vocab)
    training.load()
    validation.load()
    training.transform()
    validation.transform()

    print('Datasets Loaded.')
    print('Compiling Model.')
    model = simpleNMT(pad_length=args.padding,
                      n_chars=input_vocab.size(),
                      n_labels=output_vocab.size(),
                      embedding_learnable=False,
                      encoder_units=256,
                      decoder_units=256,
                      trainable=True,
                      return_probabilities=False)

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', all_acc])
    print('Model Compiled.')
    print('Training. Ctrl+C to end early.')

    try:
        model.fit_generator(generator=training.generator(args.batch_size),
                            steps_per_epoch=100,
                            validation_data=validation.generator(args.batch_size),
                            validation_steps=100,
                            callbacks=[cp],
                            workers=1,
                            verbose=1,
                            epochs=args.epochs)

    except KeyboardInterrupt as e:
        print('Model training stopped early.')

    print('Model training complete.')

    run_examples(model, input_vocab, output_vocab)