Exemplo n.º 1
0
def main(save_path, params):
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    ent_setup = params['ent_setup']  # ent, ent-anonym, no-ent
    data_path = params['data_path']
    # save settings
    shutil.copyfile('config.py', '%s/config.py' % save_path)

    use_chars = char_dim > 0

    if dataset == "clicr":
        dp = DataPreprocessor.DataPreprocessorClicr()
        data = dp.preprocess(
            #"/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/bmj_case_reports_data/dataset_json_concept_annotated/",
            data_path,
            ent_setup=ent_setup,
            no_training_set=False,
            use_chars=use_chars)
    elif dataset == "clicr_novice":
        dp = DataPreprocessor.DataPreprocessorNovice()
        data = dp.preprocess(data_path,
                             ent_setup=ent_setup,
                             no_training_set=False,
                             use_chars=use_chars)
    else:
        dp = DataPreprocessor.DataPreprocessor()
        data = dp.preprocess(data_path,
                             no_training_set=False,
                             use_chars=use_chars)

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training,
                                                         BATCH_SIZE,
                                                         sample=1.0)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation,
                                                       BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = GAReader.Model(nlayers, data.vocab_size, data.num_chars, W_init,
                       nhidden, embed_dim, dropout, train_emb, char_dim,
                       use_feat, gating_fn)

    print("training ...")
    num_iter = 0
    max_acc = 0.
    deltas = []

    logger = open(save_path + '/log', 'a')

    if os.path.isfile('%s/best_model.p' % save_path):
        print('loading previously saved model')
        m.load_model('%s/best_model.p' % save_path)
    else:
        print('saving init model')
        m.save_model('%s/model_init.p' % save_path)
        print('loading init model')
        m.load_model('%s/model_init.p' % save_path)

    for epoch in range(NUM_EPOCHS):
        estart = time.time()
        new_max = False

        for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_train:
            loss, tr_acc, probs = m.train(dw, dt, qw, qt, c, a, m_dw, m_qw, tt,
                                          tm, m_c, cl)

            message = "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" % (
                epoch, loss, tr_acc, time.time() - estart)
            print(message)
            logger.write(message + '\n')

            num_iter += 1
            if num_iter % VALIDATION_FREQ == 0:
                total_loss, total_acc, n, n_cand = 0., 0., 0, 0.

                for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_val:
                    outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm,
                                      m_c, cl)
                    loss, acc, probs = outs[:3]

                    bsize = dw.shape[0]
                    total_loss += bsize * loss
                    total_acc += bsize * acc
                    n += bsize
                val_acc = total_acc / n
                if val_acc > max_acc:
                    max_acc = val_acc
                    m.save_model('%s/best_model.p' % save_path)
                    new_max = True
                message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f" % (
                    epoch, total_loss / n, val_acc, max_acc)
                print(message)
                logger.write(message + '\n')

        # m.save_model('%s/model_%d.p'%(save_path,epoch))
        message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (
            epoch, tr_acc, val_acc)
        print(message)
        logger.write(message + '\n')

        # learning schedule
        if epoch >= 2:
            m.anneal()
        # stopping criterion
        if not new_max:
            break

    logger.close()
Exemplo n.º 2
0
def main(save_path, params):

    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    out = 'out'

    # save settings
    shutil.copyfile('config.py', '%s/config.py' % save_path)

    use_chars = char_dim > 0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, no_training_set=False, use_chars=use_chars)
    word_dictionary = data.dictionary[0]
    the_index = word_dictionary['the']
    #print('the index : {}'.format(word_dictionary['the']))

    idx_to_word = dict([(v, k) for (k, v) in word_dictionary.iteritems()])
    words = [idx_to_word[i] for i in sorted(idx_to_word.keys())]

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training,
                                                         BATCH_SIZE,
                                                         sample=1.0)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation,
                                                       BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    #print('the embedding : {}'.format(W_init[the_index]))
    #print(W_init[0:5])

    print("running GAReader ...")

    m = GAReader.Model(nlayers, data.vocab_size, data.num_chars, W_init,
                       nhidden, embed_dim, dropout, train_emb, char_dim,
                       use_feat, gating_fn, words).build_network()
    m.compile(optimizer=tf.keras.optimizers.Adam(lr=LEARNING_RATE,
                                                 clipnorm=GRAD_CLIP),
              loss=tf.keras.losses.categorical_crossentropy,
              metrics=[tf.keras.metrics.categorical_accuracy])
    #tf.enable_eager_execution(config=tf.ConfigProto(allow_soft_placement = True))
    with tf.Graph().as_default():
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            K.set_session(sess)
            #with tf.device('/gpu:0:'):
            tensorboard = TensorBoardCustom(log_dir="logs", words=words)
            modelcheckpoint = tf.keras.callbacks.ModelCheckpoint(
                'output/weights.{epoch:02d}-{val_loss:.2f}.hdf5')
            writer = tf.summary.FileWriter("logs")

            def schedule(epoch, lr):

                if epoch >= 3:
                    return lr * 0.5
                else:
                    return lr

            lrate = LearningRateScheduler(schedule, verbose=1)

            for epoch in xrange(NUM_EPOCHS):
                for (inputs, a) in batch_loader_train:
                    [dw, qw, m_dw, m_qw, c, m_c, cl] = inputs
                    m = GAReader.Model(nlayers, data.vocab_size,
                                       data.num_chars, W_init, nhidden,
                                       embed_dim, dropout, train_emb, char_dim,
                                       use_feat, gating_fn,
                                       words).build_network()
                    m.compile(optimizer=tf.keras.optimizers.Adam(
                        lr=LEARNING_RATE, clipnorm=GRAD_CLIP),
                              loss=tf.keras.losses.categorical_crossentropy,
                              metrics=[tf.keras.metrics.categorical_accuracy])
                    #print(dw.shape)
                    #print('dw : {}'.format(dw))
                    #print('qw : {}'.format(qw))
                    #print('m_dw : {}'.format(m_dw))
                    #print('m_qw : {}'.format(m_qw))
                    #print('c : {}'.format(c))
                    #print([idx_to_word[i] for i in dw[0, :, 0].tolist()])
                    train_summary = m.train_on_batch(
                        inputs,
                        to_categorical(a, batch_loader_train.max_num_cand))
                    print(m.get_weights()[0])
                    print('epoch: {}, train loss: {}, train acc: {}'.format(
                        epoch, train_summary[0], train_summary[1]))
                    lr = tf.summary.scalar('learning_rate', LEARNING_RATE)
                    summary = tf.summary.merge_all()
                    s = sess.run(summary)
                    writer.add_summary(s)
                writer.close()
Exemplo n.º 3
0
def train(
        network_backbone,
        pre_trained_model=None,
        trainset_filename='data/datasets/VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt',
        valset_filename='data/datasets/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt',
        images_dir='data/datasets/VOCdevkit/VOC2012/JPEGImages/',
        labels_dir='data/datasets/VOCdevkit/VOC2012/SegmentationClass/',
        trainset_augmented_filename='data/datasets/SBD/train_noval.txt',
        images_augmented_dir='data/datasets/SBD/benchmark_RELEASE/dataset/img/',
        labels_augmented_dir='data/datasets/SBD/benchmark_RELEASE/dataset/cls/',
        model_dir=None,
        log_dir='data/logs/deeplab/'):

    if not model_dir:
        model_dir = 'data/models/deeplab/{}_voc2012/'.format(network_backbone)
    num_classes = 21
    ignore_label = 255
    num_epochs = 1000
    minibatch_size = 8  # Unable to do minibatch_size = 12 :(
    random_seed = 0
    learning_rate = 1e-5
    weight_decay = 5e-4
    batch_norm_decay = 0.99
    image_shape = [513, 513]

    # validation_scales = [0.5, 1, 1.5]
    validation_scales = [1]

    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Prepare datasets
    train_dataset = Dataset(dataset_filename=trainset_filename,
                            images_dir=images_dir,
                            labels_dir=labels_dir,
                            image_extension='.jpg',
                            label_extension='.png')
    valid_dataset = Dataset(dataset_filename=valset_filename,
                            images_dir=images_dir,
                            labels_dir=labels_dir,
                            image_extension='.jpg',
                            label_extension='.png')

    # Calculate image channel means
    channel_means = save_load_means(
        means_filename='channel_means.npz',
        image_filenames=train_dataset.image_filenames,
        recalculate=False)

    voc2012_preprocessor = DataPreprocessor(channel_means=channel_means,
                                            output_size=image_shape,
                                            min_scale_factor=0.5,
                                            max_scale_factor=2.0)

    # Prepare dataset iterators
    train_iterator = Iterator(dataset=train_dataset,
                              minibatch_size=minibatch_size,
                              process_func=voc2012_preprocessor.preprocess,
                              random_seed=random_seed,
                              scramble=True,
                              num_jobs=1)
    valid_iterator = Iterator(dataset=valid_dataset,
                              minibatch_size=minibatch_size,
                              process_func=voc2012_preprocessor.preprocess,
                              random_seed=None,
                              scramble=False,
                              num_jobs=1)

    # Prepare augmented dataset
    train_augmented_dataset = Dataset(
        dataset_filename=trainset_augmented_filename,
        images_dir=images_augmented_dir,
        labels_dir=labels_augmented_dir,
        image_extension='.jpg',
        label_extension='.mat')

    channel_augmented_means = save_load_means(
        means_filename='channel_augmented_means.npz',
        image_filenames=train_augmented_dataset.image_filenames,
        recalculate=False)

    voc2012_augmented_preprocessor = DataPreprocessor(
        channel_means=channel_augmented_means,
        output_size=image_shape,
        min_scale_factor=0.5,
        max_scale_factor=2.0)
    train_augmented_iterator = Iterator(
        dataset=train_augmented_dataset,
        minibatch_size=minibatch_size,
        process_func=voc2012_augmented_preprocessor.preprocess,
        random_seed=random_seed,
        scramble=True,
        num_jobs=1)

    model = DeepLab(network_backbone,
                    num_classes=num_classes,
                    ignore_label=ignore_label,
                    batch_norm_momentum=batch_norm_decay,
                    pre_trained_model=pre_trained_model,
                    log_dir=log_dir)

    best_mIoU = 0

    for i in range(num_epochs):

        print('Epoch number: {}'.format(i))

        print('Start validation...')

        valid_loss_total = 0
        num_pixels_union_total = np.zeros(num_classes)
        num_pixels_intersection_total = np.zeros(num_classes)

        # Multi-scale inputs prediction
        for _ in trange(valid_iterator.dataset_size):
            image, label = valid_iterator.next_raw_data()
            image = subtract_channel_means(image=image,
                                           channel_means=channel_means)

            output, valid_loss = multiscale_single_validate(
                image=image,
                label=label,
                input_scales=validation_scales,
                validator=model.validate)
            valid_loss_total += valid_loss

            prediction = np.argmax(output, axis=-1)
            num_pixels_union, num_pixels_intersection = count_label_prediction_matches(
                labels=[np.squeeze(label, axis=-1)],
                predictions=[prediction],
                num_classes=num_classes,
                ignore_label=ignore_label)

            num_pixels_union_total += num_pixels_union
            num_pixels_intersection_total += num_pixels_intersection

            # validation_single_demo(image=image, label=np.squeeze(label, axis=-1), prediction=prediction, demo_dir=os.path.join(results_dir, 'validation_demo'), filename=str(_))

        mean_IOU = mean_intersection_over_union(
            num_pixels_union=num_pixels_union_total,
            num_pixels_intersection=num_pixels_intersection_total)

        valid_loss_ave = valid_loss_total / valid_iterator.dataset_size

        print('Validation loss: {:.4f} | mIoU: {:.4f}'.format(
            valid_loss_ave, mean_IOU))

        if mean_IOU > best_mIoU:
            best_mIoU = mean_IOU
            model_savename = '{}_{:.4f}.ckpt'.format(network_backbone,
                                                     best_mIoU)
            print('New best mIoU achieved, model saved as {}.'.format(
                model_savename))
            model.save(model_dir, model_savename)

        print('Start training...')

        train_loss_total = 0
        num_pixels_union_total = np.zeros(num_classes)
        num_pixels_intersection_total = np.zeros(num_classes)

        print('Training using VOC2012...')
        for _ in trange(
                np.ceil(train_iterator.dataset_size /
                        minibatch_size).astype(int)):
            images, labels = train_iterator.next_minibatch()
            balanced_weight_decay = weight_decay * sum(
                labels != ignore_label) / labels.size
            outputs, train_loss = model.train(
                inputs=images,
                labels=labels,
                target_height=image_shape[0],
                target_width=image_shape[1],
                learning_rate=learning_rate,
                weight_decay=balanced_weight_decay)
            train_loss_total += train_loss

            predictions = np.argmax(outputs, axis=-1)
            num_pixels_union, num_pixels_intersection = count_label_prediction_matches(
                labels=np.squeeze(labels, axis=-1),
                predictions=predictions,
                num_classes=num_classes,
                ignore_label=ignore_label)

            num_pixels_union_total += num_pixels_union
            num_pixels_intersection_total += num_pixels_intersection

            # validation_demo(images=images, labels=np.squeeze(labels, axis=-1), predictions=predictions, demo_dir=os.path.join(results_dir, 'training_demo'), batch_no=_)
        train_iterator.shuffle_dataset()

        print('Training using SBD...')
        for _ in trange(
                np.ceil(train_augmented_iterator.dataset_size /
                        minibatch_size).astype(int)):
            images, labels = train_augmented_iterator.next_minibatch()
            balanced_weight_decay = weight_decay * sum(
                labels != ignore_label) / labels.size
            outputs, train_loss = model.train(
                inputs=images,
                labels=labels,
                target_height=image_shape[0],
                target_width=image_shape[1],
                learning_rate=learning_rate,
                weight_decay=balanced_weight_decay)
            train_loss_total += train_loss

            predictions = np.argmax(outputs, axis=-1)
            num_pixels_union, num_pixels_intersection = count_label_prediction_matches(
                labels=np.squeeze(labels, axis=-1),
                predictions=predictions,
                num_classes=num_classes,
                ignore_label=ignore_label)

            num_pixels_union_total += num_pixels_union
            num_pixels_intersection_total += num_pixels_intersection

            # validation_demo(images=images, labels=np.squeeze(labels, axis=-1), predictions=predictions, demo_dir=os.path.join(results_dir, 'training_demo'), batch_no=_)
        train_augmented_iterator.shuffle_dataset()

        mIoU = mean_intersection_over_union(
            num_pixels_union=num_pixels_union_total,
            num_pixels_intersection=num_pixels_intersection_total)
        train_loss_ave = train_loss_total / (
            train_iterator.dataset_size +
            train_augmented_iterator.dataset_size)
        print('Training loss: {:.4f} | mIoU: {:.4f}'.format(
            train_loss_ave, mIoU))

    model.close()
Exemplo n.º 4
0
def main(load_path, params, mode='test'):

    regularizer = params['regularizer']
    rlambda = params['lambda']
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    subsample = params['subsample']
    base_model = params['model']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']

    # load settings
    shutil.copyfile('%s/config.py' % load_path, 'config.py')

    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset)
    inv_vocab = data.inv_dictionary

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE, data.dictionary)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE, data.dictionary)

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = eval(base_model).Model(nlayers, data.vocab_size, data.num_chars,
                               W_init, regularizer, rlambda, nhidden,
                               embed_dim, dropout, train_emb, subsample,
                               char_dim, use_feat, data.dictionary[4])
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    total_loss, total_acc, n = 0., 0., 0
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl,
                          match_feat, use_char, use_char_q)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention

        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        pr[n:n + bsize, :] = probs
        fids += fnames
        n += bsize

    logger = open(load_path + '/log', 'a', 0)
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print message
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pkl.dump(attns, open('%s/%s.attns' % (load_path, mode), 'w'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()
Exemplo n.º 5
0
def main(load_path, params, mode='test'):

    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['data']
    nlayers = params['nlayers']
    sub2vec = params['sub2vec']
    train_emb = params['train_emb']
    sub_dim = params['sub_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    use_subs = sub_dim > 0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, no_training_set=True, use_subs=use_subs)
    inv_vocab = data.inv_dictionary

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    S_init, sub_dim = Helpers.load_sub_embeddings(data.dictionary[1], sub2vec)
    m = model.Model(nlayers,
                    data.vocab_size,
                    data.num_chars,
                    W_init,
                    S_init,
                    nhidden,
                    embed_dim,
                    dropout,
                    train_emb,
                    sub_dim,
                    use_feat,
                    gating_fn,
                    save_attn=True)
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    total_loss, total_acc, n = 0., 0., 0
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention
        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc
        fids += fnames
        n += bsize
        print("step" + str(n) + ",acc" + str(acc))

    logger = open(load_path + '/log', 'a', 0)
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print message
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pkl.dump(attns, open('%s/%s.attns' % (load_path, mode), 'w'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()
Exemplo n.º 6
0
def main(load_path, params, mode='test'):
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    ent_setup = params['ent_setup']
    data_path = params['data_path']
    # save settings
    shutil.copyfile('config.py', '%s/config_test.py' % load_path)
    use_chars = char_dim > 0

    if dataset == "clicr":
        dp = DataPreprocessor.DataPreprocessorClicr()
        #dataset_path = "/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/bmj_case_reports_data/dataset_json_concept_annotated/"
        #dataset_path = "data/"
        data = dp.preprocess(data_path,
                             ent_setup=ent_setup,
                             no_training_set=True)
    elif dataset == "clicr_novice":
        dp = DataPreprocessor.DataPreprocessorNovice()
        data = dp.preprocess(data_path,
                             ent_setup=ent_setup,
                             no_training_set=True)
    else:
        dp = DataPreprocessor.DataPreprocessor()
        data = dp.preprocess(data_path, no_training_set=True)
    inv_vocab = data.inv_dictionary

    assert os.path.exists(params["test_file"] if mode ==
                          "test" else params["validation_file"])

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE)
    f_to_cand = {i[-1]: i[3] for i in batch_loader_test.questions}

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = GAReader.Model(nlayers,
                       data.vocab_size,
                       data.num_chars,
                       W_init,
                       nhidden,
                       embed_dim,
                       dropout,
                       train_emb,
                       char_dim,
                       use_feat,
                       gating_fn,
                       save_attn=False)
    print("model load path")
    print('%s/best_model.p' % load_path)
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    pred_ans = {}
    total_loss, total_acc, n = 0., 0., 0
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention

        for f in range(len(fnames)):
            pred_cand = probs[f].argmax()
            pred_a_ids = f_to_cand[fnames[f]][pred_cand]
            pred_a = " ".join([inv_vocab[i] for i in pred_a_ids])
            if ent_setup == "ent-anonym" and (dataset == "clicr"
                                              or dataset == "clicr_novice"):
                relabeling_dicts = data.test_relabeling_dicts if mode == 'test' else data.val_relabeling_dicts
                pred_a = relabeling_dicts[fnames[f]][pred_a]
            pred_ans[fnames[f]] = pred_a

        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        pr[n:n + bsize, :] = probs
        fids += fnames
        n += bsize

    if (params["dataset"] == "clicr" or params["dataset"] == "clicr_plain" or params["dataset"] == "clicr_novice") \
            and (mode == 'test' or mode == 'validation'):
        print("writing predictions")
        preds_data = utils.to_output_preds(pred_ans)
        preds_filepath = load_path + '/{}.preds'.format(mode)
        utils.write_preds(preds_data, file_name=preds_filepath)
        utils.external_eval(preds_filepath,
                            preds_filepath + ".scores",
                            params["test_file"]
                            if mode == "test" else params["validation_file"],
                            extended=True)
    logger = open(load_path + '/log.test', 'a')
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print(message)
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pickle.dump(attns, open('%s/%s.attns' % (load_path, mode), 'wb'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()
Exemplo n.º 7
0
def main(save_path, params):

    regularizer = params['regularizer']
    rlambda = params['lambda']
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    subsample = params['subsample']
    base_model = params['model']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    train_cut = params['train_cut']
    gating_fn = params['gating_fn']

    # save settings
    shutil.copyfile('config.py','%s/config.py'%save_path)

    use_chars = char_dim>0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, use_chars=use_chars)

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training, BATCH_SIZE, data.dictionary,
            sample=train_cut, max_qry_len=85)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation, BATCH_SIZE, data.dictionary, max_qry_len=85)
    batch_loader_test = MiniBatchLoader.MiniBatchLoader(data.test, BATCH_SIZE, data.dictionary)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(data.dictionary[0], word2vec)
    m = eval(base_model).Model(nlayers, data.vocab_size, data.num_chars, W_init, 
        regularizer, rlambda, nhidden, embed_dim, dropout, train_emb, subsample, 
            char_dim, use_feat, data.dictionary[4])

    print("training ...")
    num_iter = 0
    max_acc = 0.
    deltas = []
    test_acc = 0.

    logger = open(save_path+'/log','a',0)

    # if os.path.isfile('%s/best_model.p'%save_path):
    #     print('loading previously saved model')
    #     m.load_model('%s/best_model.p'%save_path)
    # else:
    #     print('saving init model')
    #     m.save_model('%s/model_init.p'%save_path)
    #     print('loading init model')
    #     m.load_model('%s/model_init.p'%save_path)

    for epoch in xrange(NUM_EPOCHS):
        estart = time.time()
        new_max = False

        for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_train:
            loss, tr_acc, probs = m.train(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl, match_feat, use_char, use_char_q)

            # message = "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" % (
            #         epoch, loss, tr_acc, time.time()-estart)
            # print message
            # logger.write(message+'\n')

            if num_iter % VALIDATION_FREQ == 0:
                total_loss, total_acc, n, n_cand = 0., 0., 0, 0.

                for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_val:
                    outs = m.validate(dw, dt, qw, qt, c, a, 
                            m_dw, m_qw, tt, tm, m_c, cl, match_feat, use_char, use_char_q)
                    loss, acc, probs = outs[:3]

                    bsize = dw.shape[0]
                    total_loss += bsize*loss
                    total_acc += bsize*acc
                    n += bsize

                val_acc = total_acc/n
                if val_acc > max_acc:
                    max_acc = val_acc
                    m.save_model('%s/best_model.p'%save_path)

                    temp_acc, temp_n = 0.0, 0

                    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_test:
                        outs = m.validate(dw, dt, qw, qt, c, a, 
                            m_dw, m_qw, tt, tm, m_c, cl, match_feat, use_char, use_char_q)
                        _, acc, _ = outs[:3]
                        bsize = dw.shape[0]
                        temp_acc += bsize * acc
                        temp_n += bsize

                    test_acc = temp_acc / temp_n

                    new_max = True
                message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f test=%.4f" % (
                    epoch, total_loss/n, val_acc, max_acc, test_acc)
                print message
                logger.write(message+'\n')

            num_iter += 1

        m.save_model('%s/model_%d.p'%(save_path,epoch))
        message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (epoch, tr_acc, val_acc)
        print message
        logger.write(message+'\n')
        
        # learning schedule
        if epoch >=2:
            m.anneal()
        # stopping criterion
        if not new_max:
            break

    logger.close()
Exemplo n.º 8
0
def main(save_path, params):
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    sub2vec = params['sub2vec']
    subdict = params['subdic']
    dataset = params['data']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    sub_dim = params['sub_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']

    # save settings
    shutil.copyfile('config.py', '%s/config.py' % save_path)

    use_subs = sub_dim > 0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset,
                         no_training_set=False,
                         use_subs=use_subs,
                         subdict=subdict)

    print "building minibatch loaders ...", datetime.now().strftime(
        '%Y-%m-%d %H:%M:%S')
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training,
                                                         BATCH_SIZE,
                                                         sample=1)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation,
                                                       BATCH_SIZE)

    print "building network ...", datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    S_init, sub_dim = Helpers.load_sub_embeddings(data.dictionary[1], sub2vec)
    m = model.Model(nlayers, data.vocab_size, data.num_chars, W_init, S_init,
                    nhidden, embed_dim, dropout, train_emb, sub_dim, use_feat,
                    gating_fn)

    print "training ...", datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    num_iter = 0
    max_acc = 0.
    deltas = []

    logger = open(save_path + '/log', 'a', 0)

    if os.path.isfile('%s/best_model.p' % save_path):
        print 'loading previously saved model', datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S')
        m.load_model('%s/best_model.p' % save_path)
        print "model loaded"
    else:
        print 'saving init model', datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        m.save_model('%s/model_init.p' % save_path)
        print 'loading init model', datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S')
        m.load_model('%s/model_init.p' % save_path)
    for epoch in xrange(NUM_EPOCHS):
        print "epochs training ...", datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S')
        estart = time.time()
        new_max = False
        for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_train:
            loss, tr_acc, probs = m.train(dw, dt, qw, qt, c, a, m_dw, m_qw, tt,
                                          tm, m_c, cl)

            message = "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" % (
                epoch, loss, tr_acc, time.time() - estart)
            print message
            logger.write(message + '\n')

            num_iter += 1
            if num_iter % VALIDATION_FREQ == 0:
                total_loss, total_acc, n, n_cand = 0., 0., 0, 0.

                for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_val:
                    outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm,
                                      m_c, cl)
                    loss, acc, probs = outs[:3]

                    bsize = dw.shape[0]
                    total_loss += bsize * loss
                    total_acc += bsize * acc
                    n += bsize
                print('validate on ', str(n) + 'validation data')
                val_acc = total_acc / n
                if val_acc > max_acc:
                    max_acc = val_acc
                    m.save_model('%s/best_model.p' % save_path)
                    new_max = True
                message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f" % (
                    epoch, total_loss / n, val_acc, max_acc)
                print message
                logger.write(message + '\n')

        m.save_model('%s/model_%d.p' % (save_path, epoch))
        message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (
            epoch, tr_acc, val_acc)
        print message
        logger.write(message + '\n')

        # learning schedule
        if epoch >= 2:
            m.anneal()
        # stopping criterion
        if not new_max:
            break

    logger.close()
Exemplo n.º 9
0
def train(network_backbone, pre_trained_model=None,
          trainset_filename='/content/Data_Camera_SanTennis_Labeled/train.txt',
          valset_filename='/content/Data_Camera_SanTennis_Labeled/valid.txt',
          images_dir='/content/Data_Camera_SanTennis_Labeled/RGBs/',
          labels_dir='/content/Data_Camera_SanTennis_Labeled/Labels/',
          trainset_augmented_filename='data/datasets/SBD/train_noval.txt',
          images_augmented_dir='data/datasets/SBD/benchmark_RELEASE/dataset/img/',
          labels_augmented_dir='data/datasets/SBD/benchmark_RELEASE/dataset/cls/', model_dir=None,
          log_dir='data/logs/deeplab/'):
    if not model_dir:
        model_dir = '/content/drive/MyDrive/Colab Notebooks/RobotNhatBongTennis2021/Models/'
    num_classes = 5
    ignore_label = 255
    num_epochs = 1000
    minibatch_size = 4  # Unable to do minibatch_size = 12 :(
    random_seed = 0
    learning_rate = 1e-3
    weight_decay = 5e-4
    batch_norm_decay = 0.99
    image_shape = [480, 640]

    # validation_scales = [0.5, 1, 1.5]
    validation_scales = [1]

    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Prepare datasets
    train_dataset = Dataset(dataset_filename=trainset_filename, images_dir=images_dir, labels_dir=labels_dir,
                            image_extension='.png', label_extension='.png')
    valid_dataset = Dataset(dataset_filename=valset_filename, images_dir=images_dir, labels_dir=labels_dir,
                            image_extension='.png', label_extension='.png')

    # Calculate image channel means
    channel_means = save_load_means(means_filename='channel_means.npz', image_filenames=train_dataset.image_filenames,
                                    recalculate=False)

    voc2012_preprocessor = DataPreprocessor(channel_means=channel_means, output_size=image_shape, min_scale_factor=0.5,
                                            max_scale_factor=2.0)

    # Prepare dataset iterators
    train_iterator = Iterator(dataset=train_dataset, minibatch_size=minibatch_size,
                              process_func=voc2012_preprocessor.preprocess, random_seed=random_seed, scramble=True,
                              num_jobs=1)
    valid_iterator = Iterator(dataset=valid_dataset, minibatch_size=minibatch_size,
                              process_func=voc2012_preprocessor.preprocess, random_seed=None, scramble=False,
                              num_jobs=1)

    # Prepare augmented dataset
    # train_augmented_dataset = Dataset(dataset_filename=trainset_augmented_filename, images_dir=images_augmented_dir, labels_dir=labels_augmented_dir, image_extension='.jpg', label_extension='.mat')
    #
    # channel_augmented_means = save_load_means(means_filename='channel_augmented_means.npz', image_filenames=train_augmented_dataset.image_filenames, recalculate=False)
    #
    # voc2012_augmented_preprocessor = DataPreprocessor(channel_means=channel_augmented_means, output_size=image_shape, min_scale_factor=0.5, max_scale_factor=2.0)
    # train_augmented_iterator = Iterator(dataset=train_augmented_dataset, minibatch_size=minibatch_size, process_func=voc2012_augmented_preprocessor.preprocess, random_seed=random_seed, scramble=True, num_jobs=1)

    model = DeepLab(network_backbone, num_classes=num_classes, ignore_label=ignore_label,
                    batch_norm_momentum=batch_norm_decay, pre_trained_model=pre_trained_model, log_dir=log_dir)

    best_mIoU = 0

    train_loss = ","
    train_mIoU = ","
    valid_loss = ","
    valid_mIoU = ","

    for i in range(num_epochs):

        print('Epoch number: {}'.format(i))

        print('Start validation...')

        valid_loss_total = 0
        num_pixels_union_total = np.zeros(num_classes)
        num_pixels_intersection_total = np.zeros(num_classes)

        rand = np.random.randint(0, valid_iterator.dataset_size - 1)
        count = 0

        # Multi-scale inputs prediction
        for _ in trange(valid_iterator.dataset_size):
            image, label = valid_iterator.next_raw_data()

            # image = subtract_channel_means(image=image, channel_means=channel_means)

            output, valid_loss = multiscale_single_validate(image=image, label=label, input_scales=validation_scales,
                                                            validator=model.validate)
            valid_loss_total += valid_loss

            prediction = np.argmax(output, axis=-1)
            num_pixels_union, num_pixels_intersection = count_label_prediction_matches(
                labels=[np.squeeze(label, axis=-1)], predictions=[prediction], num_classes=num_classes,
                ignore_label=ignore_label)

            num_pixels_union_total += num_pixels_union
            num_pixels_intersection_total += num_pixels_intersection

            if count == rand:
                validation_single_demo_collage(image=image, label=np.squeeze(label, axis=-1), prediction=prediction,
                                               demo_dir=os.path.join(
                                                   "/content/CustomDeeplabv3/data/demos/deeplab/resnet_101_voc2012/",
                                                   'validation_demo'), val_no=str(i))

            count += 1

        mean_IOU = mean_intersection_over_union(num_pixels_union=num_pixels_union_total,
                                                num_pixels_intersection=num_pixels_intersection_total)

        valid_loss_ave = valid_loss_total / valid_iterator.dataset_size

        print('Validation loss: {:.4f} | mIoU: {:.4f}'.format(valid_loss_ave, mean_IOU))

        # valid_loss += str(train_loss_total / train_iterator.dataset_size) + ","
        valid_mIoU += str(mean_IOU) + ","

        if mean_IOU > best_mIoU and mean_IOU > 0.25:
            best_mIoU = mean_IOU
            model_savename = '{}_{:.4f}.ckpt'.format(network_backbone, best_mIoU)
            print('New best mIoU achieved, model saved as {}.'.format(model_savename))
            model.save(model_dir, model_savename)

        print('Start training...')

        train_loss_total = 0
        num_pixels_union_total = np.zeros(num_classes)
        num_pixels_intersection_total = np.zeros(num_classes)

        print('Training using Data Nhà làm...')
        for _ in trange(np.ceil(train_iterator.dataset_size / minibatch_size).astype(int)):
            images, labels = train_iterator.next_minibatch()
            balanced_weight_decay = weight_decay * sum(labels != ignore_label) / labels.size
            outputs, train_loss = model.train(inputs=images, labels=labels, target_height=image_shape[0],
                                              target_width=image_shape[1], learning_rate=learning_rate,
                                              weight_decay=balanced_weight_decay)
            train_loss_total += train_loss

            predictions = np.argmax(outputs, axis=-1)
            num_pixels_union, num_pixels_intersection = count_label_prediction_matches(
                labels=np.squeeze(labels, axis=-1), predictions=predictions, num_classes=num_classes,
                ignore_label=ignore_label)

            num_pixels_union_total += num_pixels_union
            num_pixels_intersection_total += num_pixels_intersection

            validation_demo_collage(images=images, labels=np.squeeze(labels, axis=-1), predictions=predictions,
                                    demo_dir=os.path.join(
                                        "/content/CustomDeeplabv3/data/demos/deeplab/resnet_101_voc2012/",
                                        'training_demo'), batch_no=i)
        train_iterator.shuffle_dataset()

        # print('Training using SBD...')
        # for _ in trange(np.ceil(train_augmented_iterator.dataset_size / minibatch_size).astype(int)):
        #     images, labels = train_augmented_iterator.next_minibatch()
        #     balanced_weight_decay = weight_decay * sum(labels != ignore_label) / labels.size
        #     outputs, train_loss = model.train(inputs=images, labels=labels, target_height=image_shape[0], target_width=image_shape[1], learning_rate=learning_rate, weight_decay=balanced_weight_decay)
        #     train_loss_total += train_loss
        #
        #     predictions = np.argmax(outputs, axis=-1)
        #     num_pixels_union, num_pixels_intersection = count_label_prediction_matches(labels=np.squeeze(labels, axis=-1), predictions=predictions, num_classes=num_classes, ignore_label=ignore_label)
        #
        #     num_pixels_union_total += num_pixels_union
        #     num_pixels_intersection_total += num_pixels_intersection
        #
        #     # validation_demo(images=images, labels=np.squeeze(labels, axis=-1), predictions=predictions, demo_dir=os.path.join(results_dir, 'training_demo'), batch_no=_)
        # train_augmented_iterator.shuffle_dataset()

        mIoU = mean_intersection_over_union(num_pixels_union=num_pixels_union_total,
                                            num_pixels_intersection=num_pixels_intersection_total)
        # train_loss_ave = train_loss_total / (train_iterator.dataset_size + train_augmented_iterator.dataset_size)
        train_loss_ave = train_loss_total / train_iterator.dataset_size
        print('Training loss: {:.4f} | mIoU: {:.4f}'.format(train_loss_ave, mIoU))

        # train_loss += str(train_loss_total / train_iterator.dataset_size) + ","
        train_mIoU += str(mIoU) + ","

        # loss_log = open("/content/drive/MyDrive/Colab Notebooks/RobotNhatBongTennis2021/loss_log.txt", "w")
        mIoU_log = open("/content/drive/MyDrive/Colab Notebooks/RobotNhatBongTennis2021/mIoU_log.txt", "w")

        # loss_log.write(train_loss + "\n" + valid_loss)
        mIoU_log.write(train_mIoU + "\n" + valid_mIoU)

    model.close()
Exemplo n.º 10
0
import numpy as np
import sys

from utils import Helpers, DataPreprocessor, MiniBatchLoader
from model import GAReader

# NOTE: config.py should be consistent with the training model
from config import *

model_path = sys.argv[1]
output_path = sys.argv[2]
dataset = sys.argv[3]
K = int(sys.argv[4])
top_K = 3

dp = DataPreprocessor.DataPreprocessor()

# NOTE: make sure vocab.txt is already there!
data = dp.preprocess(DATASET, no_training_set=True)
inv_vocab = data.inv_dictionary

print("building minibatch loaders ...")
if not 'CANDIDATE_SUBSET' in locals():
    CANDIDATE_SUBSET = False
if dataset == 'validation':
    batch_loader_test = MiniBatchLoader.MiniBatchLoader(
        data.validation, 128, shuffle=False, candidate_subset=CANDIDATE_SUBSET)
elif dataset == 'test':
    batch_loader_test = MiniBatchLoader.MiniBatchLoader(
        data.test, 128, shuffle=False, candidate_subset=CANDIDATE_SUBSET)
Exemplo n.º 11
0
def main(save_path, params, mode='train'):

    word2vec = params['word2vec']
    dataset = params['dataset']

    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess_rc(params, dataset)

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoaderMention(
        params, data.training, params['batch_size'])
    batch_loader_val = MiniBatchLoader.MiniBatchLoaderMention(
        params,
        data.validation,
        params['batch_size'],
        shuffle=False,
        ensure_answer=False)
    batch_loader_test = MiniBatchLoader.MiniBatchLoaderMention(
        params,
        data.test,
        params['batch_size'],
        shuffle=False,
        ensure_answer=False)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = GA.Model(params, W_init, embed_dim)

    print("training ...")
    num_iter = 0
    max_acc = 0.0
    min_loss = 1e5

    logger = open(save_path + '/log', 'a', 0)
    train_writer = tf.summary.FileWriter(os.path.join(save_path, 'train'))
    val_writer = tf.summary.FileWriter(os.path.join(save_path, 'val'))

    if params['reload_']:
        print('loading previously saved model')
        saves = pkl.load(open('%s/checkpoints.p' % save_path))
        m.load_model('%s/best_model.p' % save_path, saves[-1])

    # train
    if mode == 'train':
        saves = []
        for epoch in xrange(params['num_epochs']):
            estart = time.time()
            stop_flag = False

            for example in batch_loader_train:
                loss, tr_acc, probs, summary = m.train(*example[:-2])

                if num_iter % params['logging_frequency'] == 0:
                    message = (
                        "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" %
                        (epoch, loss, tr_acc, time.time() - estart))
                    print(message)
                    logger.write(message + '\n')
                    train_writer.add_summary(summary, num_iter)

                num_iter += 1
                if num_iter % params['validation_frequency'] == 0:
                    total_loss, total_acc, n = 0., 0., 0.

                    for example in batch_loader_val:
                        outs = m.validate(*example[:-2])
                        loss, acc, probs = outs[:3]

                        bsize = example[0].shape[0]
                        total_loss += bsize * loss
                        total_acc += bsize * acc
                        n += bsize

                    val_acc = total_acc / n
                    print("11111111111   ", val_acc)
                    if val_acc > max_acc:
                        max_acc = val_acc
                        save_id = num_iter
                        print("111111111111111111111111111111")
                        sv = m.save_model('%s/best_model.p' % save_path,
                                          save_id)
                        saves.append(save_id)
                        new_max = True

                    val_loss = total_loss / n
                    message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f" % (
                        epoch, val_loss, val_acc, max_acc)
                    print(message)
                    logger.write(message + '\n')

                    _add_summary(val_writer, val_loss, "loss", num_iter)
                    _add_summary(val_writer, val_acc, "accuracy", num_iter)

                    # stopping
                    if val_loss < min_loss: min_loss = val_loss
                    if params['stopping_criterion'] and (
                            val_loss - min_loss) / min_loss > 0.3:
                        stop_flag = True
                        break

                if num_iter % params["anneal_frequency"] == 0:
                    m.anneal()

            #m.save_model('%s/model_%d.p'%(save_path,epoch))
            message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (
                epoch, tr_acc, max_acc)
            print(message)
            logger.write(message + '\n')

            if stop_flag: break
        # record all saved models
        pkl.dump(saves, open('%s/checkpoints.p' % save_path, 'w'))

    # test
    mode = 'test' if mode in ['train', 'test'] else 'val'
    print("testing ...")
    try:
        saves = pkl.load(open('%s/checkpoints.p' % save_path))
        print('%s/checkpoints.p' % save_path)
    except IOError:

        def _to_num(foo):
            try:
                num = int(foo)
            except ValueError:
                return None
            return num

        saves = []
        for directory in os.listdir(save_path):
            if not os.path.isdir(os.path.join(save_path, directory)): continue
            num = _to_num(directory)
            if num is None: continue
            saves.append(num)

        saves = sorted(saves)
    print("saves111111", saves)
    if not saves:
        print("No models saved during training!")
        return
    print('loading model')
    m.load_model('%s/best_model.p' % save_path, saves[-1])

    total_loss, total_acc, n = 0., 0., 0
    answer_structure = {}
    idict = data.inv_dictionary
    for example in batch_loader_val:
        outs = m.validate(*example[:-2])
        loss, acc, probs = outs[:3]

        pred_indices = np.argmax(probs, axis=1)
        for i in range(len(example[-1])):
            cname = str(example[-1][i]).strip()
            gt_answer = example[10][i]
            answer_structure[cname] = (pred_indices[i], gt_answer, probs[i, :])

        bsize = example[0].shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        n += bsize
    test_acc = total_acc / n
    test_loss = total_loss / n
    message = "TEST loss=%.4e acc=%.4f" % (test_loss, test_acc)
    print(message)
    logger.write(message + '\n')
    pkl.dump(answer_structure,
             open(os.path.join(save_path, "test_answer_structure.p"), "w"))

    logger.close()

    # clean up
    print("Cleaning up saved models ...")
Exemplo n.º 12
0
def main(load_path, params, mode='test'):

    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']

    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, no_training_set=True)
    inv_vocab = data.inv_dictionary

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = Reader.Model(nlayers,
                     data.vocab_size,
                     data.num_chars,
                     W_init,
                     nhidden,
                     embed_dim,
                     dropout,
                     train_emb,
                     char_dim,
                     use_feat,
                     gating_fn,
                     save_attn=True)
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    total_loss, total_acc, n = 0., 0., 0
    result = {}
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention

        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        pr[n:n + bsize, :] = probs
        fids += fnames
        n += bsize

        answer = probs.argmax(1)
        for it in range(len(fnames)):
            tid = fnames[it].split('/')[-1].split('.')[0].strip()
            result[eval(tid)] = answer[it]
            print tid, answer[it]
        print('probs----', probs)
        #print('a----', a)
        print('fnames----', fnames)

    print len(result)
    with open('raw.txt', 'w') as ff:
        for i in range(1, 2501):
            ff.write(str(result[i]) + '\n')

    logger = open(load_path + '/log', 'a', 0)
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print message
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pkl.dump(attns, open('%s/%s.attns' % (load_path, mode), 'w'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()