コード例 #1
0
def main():
    start_time = time.time()
    #signal.signal(signal.SIGINT, InterruptHandler)
    #signal.signal(signal.SIGKILL, InterruptHandler)
    #signal.signal(signal.SIGTERM, InterruptHandler)

    parser = argparse.ArgumentParser(
        prog='testLSTM.py',
        description='Test LSTM model for visual question answering')
    parser.add_argument('--model',
                        type=str,
                        required=True,
                        metavar='<model-path>')
    parser.add_argument('--weights',
                        type=str,
                        required=True,
                        metavar='<weights-path>')
    parser.add_argument('--output',
                        type=str,
                        required=True,
                        metavar='<prediction-path>')
    args = parser.parse_args()

    word_vec_dim = 300
    batch_size = 128

    #######################
    #      Load Model     #
    #######################

    print('Loading model and weights...')
    model = model_from_json(open(args.model, 'r').read())
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(args.weights)
    print('Model and weights loaded.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #      Load Data     #
    ######################

    print('Loading data...')

    dev_id_pairs, dev_image_ids = LoadIds('dev')
    #test_id_pairs, test_image_ids = LoadIds('test')

    dev_questions = LoadQuestions('dev')
    #test_questions = LoadQuestions('test')

    dev_choices = LoadChoices('dev')
    #test_choices = LoadChoices('test')

    dev_answers = LoadAnswers('dev')

    print('Finished loading data.')
    print('Time: %f s' % (time.time() - start_time))

    #######################
    #  Load Word Vectors  #
    #######################

    # load GloVe vectors
    print('Loading GloVe vectors...')
    word_embedding, word_map = LoadGloVe()
    print('GloVe vectors loaded')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #    Make Batches    #
    ######################

    print('Making batches...')

    # validation batches
    dev_question_batches = [
        b for b in MakeBatches(
            dev_questions, batch_size, fillvalue=dev_questions[-1])
    ]
    dev_answer_batches = [
        b for b in MakeBatches(
            dev_answers['labs'], batch_size, fillvalue=dev_answers['labs'][-1])
    ]
    dev_choice_batches = [
        b for b in MakeBatches(
            dev_choices, batch_size, fillvalue=dev_choices[-1])
    ]

    print('Finished making batches.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #       Testing      #
    ######################

    # evaluate on dev set
    widgets = [
        'Evaluating ',
        Percentage(), ' ',
        Bar(marker='#', left='[', right=']'), ' ',
        ETA()
    ]
    pbar = ProgressBar(widgets=widgets)

    dev_correct = 0
    predictions = []

    for i in pbar(range(len(dev_question_batches))):
        # feed forward
        X_question_batch = GetQuestionsTensor(dev_question_batches[i],
                                              word_embedding, word_map)
        prob = model.predict_proba(X_question_batch, batch_size, verbose=0)

        # get word vecs of choices
        choice_feats = GetChoicesTensor(dev_choice_batches[i], word_embedding,
                                        word_map)
        similarity = np.zeros((5, batch_size), float)
        # calculate cosine distances
        for j in range(5):
            similarity[j] = np.diag(cosine_similarity(prob, choice_feats[j]))
        # take argmax of cosine distances
        pred = np.argmax(similarity, axis=0) + 1
        predictions.extend(pred.tolist())

        dev_correct += np.count_nonzero(dev_answer_batches[i] == pred)

    dev_acc = float(dev_correct) / len(dev_questions)
    print('Validation Accuracy: %f' % dev_acc)
    print('Validation Accuracy: %f' % dev_acc, file=sys.stderr)
    SavePredictions(args.output, predictions, dev_id_pairs)
    print('Time: %f s' % (time.time() - start_time))
    print('Time: %f s' % (time.time() - start_time), file=sys.stderr)
    print('Testing finished.')
    print('Testing finished.', file=sys.stderr)
コード例 #2
0
def main():
    start_time = time.time()

    parser = argparse.ArgumentParser(
        prog='trainMemNN.py',
        description='Train MemmNN  model for visual question answering')
    parser.add_argument('--mlp-hidden-units',
                        type=int,
                        default=1024,
                        metavar='<mlp-hidden-units>')
    parser.add_argument('--mlp-hidden-layers',
                        type=int,
                        default=3,
                        metavar='<mlp-hidden-layers>')
    parser.add_argument('--mlp-activation',
                        type=str,
                        default='tanh',
                        metavar='<activation-function>')
    parser.add_argument('--emb-dimension',
                        type=int,
                        default=50,
                        metavar='<embedding-dimension>')
    parser.add_argument('--batch-size',
                        type=int,
                        default=128,
                        metavar='<batch-size>')
    parser.add_argument('--hops', type=int, default=3, metavar='<memnet-hops>')
    #parser.add_argument('--model-path', type=str, required=True, metavar='<model-path>')
    parser.add_argument('--weight-path',
                        type=str,
                        required=True,
                        metavar='<weight-path>')
    parser.add_argument('--output-path',
                        type=str,
                        required=True,
                        metavar='<output-path>')
    args = parser.parse_args()

    word_vec_dim = 300
    img_dim = 300
    max_len = 30
    img_feature_num = 125
    ######################
    #      Load Data     #
    ######################
    data_dir = '/home/mlds/data/0.05_val/'

    print('Loading data...')

    #dev_q_ids, dev_image_ids = LoadIds('dev', data_dir)
    test_q_ids, test_image_ids = LoadIds('test', data_dir)

    #dev_questions = LoadQuestions('dev', data_dir)
    test_questions = LoadQuestions('test', data_dir)

    #dev_choices = LoadChoices('dev', data_dir)
    test_choices = LoadChoices('test', data_dir)

    caption_map = LoadCaptions('test')

    print('Finished loading data.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    # Model Descriptions #
    ######################
    print('Loading and compiling model...')
    model = CreateGraph(args.emb_dimension, args.hops, args.mlp_activation,
                        args.mlp_hidden_units, args.mlp_hidden_layers,
                        word_vec_dim, img_dim, img_feature_num)
    #model = model_from_json(open(args.model_path,'r').read())

    # loss and optimizer
    model.compile(loss={'output': Loss}, optimizer='rmsprop')
    model.load_weights(args.weight_path)

    print('Model and weights loaded.')
    print('Time: %f s' % (time.time() - start_time))

    ########################################
    #  Load CNN Features and Word Vectors  #
    ########################################

    # load VGG features
    '''
    print('Loading VGG features...')
    VGG_features, img_map = LoadVGGFeatures()
    print('VGG features loaded')
    print('Time: %f s' % (time.time()-start_time))
    '''

    # load GloVe vectors
    print('Loading GloVe vectors...')
    word_embedding, word_map = LoadGloVe()
    print('GloVe vectors loaded')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #    Make Batches    #
    ######################

    print('Making batches...')

    # validation batches
    # dev_question_batches = [ b for b in MakeBatches(dev_questions, args.batch_size, fillvalue=dev_questions[-1]) ]
    # dev_answer_batches = [ b for b in MakeBatches(dev_answers['labs'], args.batch_size, fillvalue=dev_answers['labs'][-1]) ]
    # dev_choice_batches = [ b for b in MakeBatches(dev_choices, args.batch_size, fillvalue=dev_choices[-1]) ]
    # dev_image_batches = [ b for b in MakeBatches(dev_image_ids, args.batch_size, fillvalue=dev_image_ids[-1]) ]

    # testing batches
    test_question_batches = [
        b for b in MakeBatches(
            test_questions, args.batch_size, fillvalue=test_questions[-1])
    ]
    test_choice_batches = [
        b for b in MakeBatches(
            test_choices, args.batch_size, fillvalue=test_choices[-1])
    ]
    test_image_batches = [
        b for b in MakeBatches(
            test_image_ids, args.batch_size, fillvalue=test_image_ids[-1])
    ]

    print('Finished making batches.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #      Testing       #
    ######################

    # start predicting
    pbar = generic_utils.Progbar(len(test_question_batches) * args.batch_size)

    predictions = []
    # feed forward
    for i in range(len(test_question_batches)):
        X_question_batch = GetQuestionsTensor(test_question_batches[i],
                                              word_embedding, word_map)
        X_caption_batch = GetCaptionsTensor2(test_image_batches[i],
                                             word_embedding, word_map,
                                             caption_map)
        prob = model.predict_on_batch({
            'question': X_question_batch,
            'image': X_caption_batch
        })
        prob = prob[0]

        # get word vecs of choices
        choice_feats = GetChoicesTensor(test_choice_batches[i], word_embedding,
                                        word_map)
        similarity = np.zeros((5, args.batch_size), float)
        # calculate cosine distances
        for j in range(5):
            similarity[j] = np.diag(cosine_similarity(prob, choice_feats[j]))
        # take argmax of cosine distances
        pred = np.argmax(similarity, axis=0) + 1
        predictions.extend(pred)

        pbar.add(args.batch_size)
    SavePredictions(args.output_path, predictions, test_q_ids)

    print('Testing finished.')
    print('Time: %f s' % (time.time() - start_time))
コード例 #3
0
def main():
    start_time = time.time()

    parser = argparse.ArgumentParser(
        prog='testLSTM_MLP.py',
        description='Test LSTM-MLP model for visual question answering')
    parser.add_argument('--model',
                        type=str,
                        required=True,
                        metavar='<model-path>')
    parser.add_argument('--weights',
                        type=str,
                        required=True,
                        metavar='<weights-path>')
    parser.add_argument('--output',
                        type=str,
                        required=True,
                        metavar='<prediction-path>')
    args = parser.parse_args()

    word_vec_dim = 300
    batch_size = 128

    #######################
    #      Load Model     #
    #######################

    print('Loading model and weights...')
    model = model_from_json(open(args.model, 'r').read())
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(args.weights)
    print('Model and weights loaded.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #      Load Data     #
    ######################

    print('Loading data...')

    #dev_id_pairs, dev_image_ids = LoadIds('dev')
    test_id_pairs, test_image_ids = LoadIds('test')

    #dev_questions = LoadQuestions('dev')
    test_questions = LoadQuestions('test')

    #dev_choices = LoadChoices('dev')
    test_choices = LoadChoices('test')

    #dev_answers = LoadAnswers('dev')

    print('Finished loading data.')
    print('Time: %f s' % (time.time() - start_time))

    ########################################
    #  Load CNN Features and Word Vectors  #
    ########################################

    # load VGG features
    print('Loading VGG features...')
    VGG_features, img_map = LoadVGGFeatures()
    print('VGG features loaded')
    print('Time: %f s' % (time.time() - start_time))

    # load GloVe vectors
    print('Loading GloVe vectors...')
    word_embedding, word_map = LoadGloVe()
    print('GloVe vectors loaded')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #    Make Batches    #
    ######################

    print('Making batches...')

    # validation batches
    #dev_question_batches = [ b for b in MakeBatches(dev_questions, batch_size, fillvalue=dev_questions[-1]) ]
    #dev_answer_batches = [ b for b in MakeBatches(dev_answers['labs'], batch_size, fillvalue=dev_answers['labs'][-1]) ]
    #dev_choice_batches = [ b for b in MakeBatches(dev_choices, batch_size, fillvalue=dev_choices[-1]) ]
    #dev_image_batches = [ b for b in MakeBatches(dev_image_ids, batch_size, fillvalue=dev_image_ids[-1]) ]

    # testing batches
    test_question_batches = [
        b for b in MakeBatches(
            test_questions, batch_size, fillvalue=test_questions[-1])
    ]
    test_choice_batches = [
        b for b in MakeBatches(
            test_choices, batch_size, fillvalue=test_choices[-1])
    ]
    test_image_batches = [
        b for b in MakeBatches(
            test_image_ids, batch_size, fillvalue=test_image_ids[-1])
    ]

    print('Finished making batches.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #       Testing      #
    ######################

    # evaluate on dev set
    pbar = generic_utils.Progbar(len(test_question_batches) * batch_size)

    #dev_correct = 0
    predictions = []

    for i in range(len(test_question_batches)):
        # feed forward
        X_question_batch = GetQuestionsTensor(test_question_batches[i],
                                              word_embedding, word_map)
        X_image_batch = GetImagesMatrix(test_image_batches[i], img_map,
                                        VGG_features)
        prob = model.predict_proba([X_question_batch, X_image_batch],
                                   batch_size,
                                   verbose=0)

        # get word vecs of choices
        choice_feats = GetChoicesTensor(test_choice_batches[i], word_embedding,
                                        word_map)
        similarity = np.zeros((5, batch_size), float)
        # calculate cosine distances
        for j in range(5):
            similarity[j] = np.diag(cosine_similarity(prob, choice_feats[j]))
        # take argmax of cosine distances
        pred = np.argmax(similarity, axis=0) + 1
        predictions.extend(pred.tolist())
        pbar.add(batch_size)

        #dev_correct += np.count_nonzero(dev_answer_batches[i]==pred)

    SavePredictions(args.output, predictions, test_q_ids)
    print('Time: %f s' % (time.time() - start_time))
    print('Testing finished.')
コード例 #4
0
def main():
    start_time = time.time()

    parser = argparse.ArgumentParser(
        prog='valLSTM_MLP.py',
        description='Test LSTM-MLP model for visual question answering')
    parser.add_argument('--model-vgg',
                        type=str,
                        required=True,
                        metavar='<model-path>')
    parser.add_argument('--weights-vgg',
                        type=str,
                        required=True,
                        metavar='<weights-path>')
    parser.add_argument('--model-inc',
                        type=str,
                        required=True,
                        metavar='<model-path>')
    parser.add_argument('--weights-inc',
                        type=str,
                        required=True,
                        metavar='<weights-path>')
    parser.add_argument('--output',
                        type=str,
                        required=True,
                        metavar='<prediction-path>')
    args = parser.parse_args()

    word_vec_dim = 300
    batch_size = 128
    vgg_weight = 0.25
    inc_weight = 1 - vgg_weight

    #######################
    #      Load Models    #
    #######################

    print('Loading models and weights...')
    model_vgg = model_from_json(open(args.model_vgg, 'r').read())
    model_vgg.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model_vgg.load_weights(args.weights_vgg)

    model_inc = model_from_json(open(args.model_inc, 'r').read())
    model_inc.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model_inc.load_weights(args.weights_inc)
    print('Models and weights loaded.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #      Load Data     #
    ######################
    data_dir = '/home/mlds/data/0.05_val/'

    print('Loading data...')

    #train_id_pairs, train_image_ids = LoadIds('train')
    #dev_id_pairs, dev_image_ids = LoadIds('dev')
    test_q_ids, test_image_ids = LoadIds('test', data_dir)

    #train_questions = LoadQuestions('train')
    #dev_questions = LoadQuestions('dev')
    test_questions = LoadQuestions('test', data_dir)

    #train_choices = LoadChoices('train')
    #dev_choices = LoadChoices('dev')
    test_choices = LoadChoices('test', data_dir)

    #train_answers = LoadAnswers('train')
    #dev_answers = LoadAnswers('dev')
    caption_map = LoadCaptions('test')

    print('Finished loading data.')
    print('Time: %f s' % (time.time() - start_time))

    ########################################
    #  Load CNN Features and Word Vectors  #
    ########################################

    # load VGG features
    print('Loading VGG features...')
    VGG_features, vgg_img_map = LoadVGGFeatures()
    print('VGG features loaded')
    print('Time: %f s' % (time.time() - start_time))

    # load Inception features
    print('Loading Inception features...')
    INC_features, inc_img_map = LoadInceptionFeatures()
    print('Inception features loaded')
    print('Time: %f s' % (time.time() - start_time))

    # load GloVe vectors
    print('Loading GloVe vectors...')
    word_embedding, word_map = LoadGloVe()
    print('GloVe vectors loaded')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #    Make Batches    #
    ######################

    print('Making batches...')

    # train batches
    # train_question_batches = [ b for b in MakeBatches(train_questions, batch_size, fillvalue=train_questions[-1]) ]
    # train_answer_batches = [ b for b in MakeBatches(train_answers['labs'], batch_size, fillvalue=train_answers['labs'][-1]) ]
    # train_choice_batches = [ b for b in MakeBatches(train_choices, batch_size, fillvalue=train_choices[-1]) ]
    # train_image_batches = [ b for b in MakeBatches(train_image_ids, batch_size, fillvalue=train_image_ids[-1]) ]

    # validation batches
    # dev_question_batches = [ b for b in MakeBatches(dev_questions, batch_size, fillvalue=dev_questions[-1]) ]
    # dev_answer_batches = [ b for b in MakeBatches(dev_answers['labs'], batch_size, fillvalue=dev_answers['labs'][-1]) ]
    # dev_choice_batches = [ b for b in MakeBatches(dev_choices, batch_size, fillvalue=dev_choices[-1]) ]
    # dev_image_batches = [ b for b in MakeBatches(dev_image_ids, batch_size, fillvalue=dev_image_ids[-1]) ]

    # testing batches
    test_question_batches = [
        b for b in MakeBatches(
            test_questions, batch_size, fillvalue=test_questions[-1])
    ]
    test_choice_batches = [
        b for b in MakeBatches(
            test_choices, batch_size, fillvalue=test_choices[-1])
    ]
    test_image_batches = [
        b for b in MakeBatches(
            test_image_ids, batch_size, fillvalue=test_image_ids[-1])
    ]

    print('Finished making batches.')
    print('Time: %f s' % (time.time() - start_time))

    ######################
    #       Testing      #
    ######################

    predictions = []
    pbar = generic_utils.Progbar(len(test_question_batches) * batch_size)

    for i in range(len(test_question_batches)):
        # feed forward
        X_question_batch = GetQuestionsTensor(test_question_batches[i],
                                              word_embedding, word_map)
        X_vgg_image_batch = GetImagesMatrix(test_image_batches[i], vgg_img_map,
                                            VGG_features)
        X_inc_image_batch = GetImagesMatrix(test_image_batches[i], inc_img_map,
                                            INC_features)
        X_caption_batch = GetCaptionsTensor(test_image_batches[i],
                                            word_embedding, word_map,
                                            caption_map)
        prob_vgg = model_vgg.predict_proba(
            [X_question_batch, X_caption_batch, X_vgg_image_batch],
            batch_size,
            verbose=0)
        prob_inc = model_inc.predict_proba(
            [X_question_batch, X_caption_batch, X_inc_image_batch],
            batch_size,
            verbose=0)
        prob = (vgg_weight * prob_vgg + inc_weight * prob_inc)

        # get word vecs of choices
        choice_feats = GetChoicesTensor(test_choice_batches[i], word_embedding,
                                        word_map)
        similarity = np.zeros((5, batch_size), float)
        # calculate cosine distances
        for j in range(5):
            similarity[j] = np.diag(cosine_similarity(prob, choice_feats[j]))
        # take argmax of cosine distances
        pred = np.argmax(similarity, axis=0) + 1
        predictions.extend(pred.tolist())

        pbar.add(batch_size)

    SavePredictions(args.output, predictions, test_q_ids)

    print('Time: %f s' % (time.time() - start_time))
    print('Testing finished.')
コード例 #5
0
ファイル: test_sent.py プロジェクト: pedrotmoreira/AlaBaSoAya
def main():
    start_time = time.time()

    # argument parser
    parser = argparse.ArgumentParser(prog='test_sent.py',
            description='Test MemNN-wordvec model for ABSA sentiment classification')
    parser.add_argument('--mlp-hidden-units', type=int, default=256, metavar='<mlp-hidden-units>')
    parser.add_argument('--mlp-hidden-layers', type=int, default=2, metavar='<mlp-hidden-layers>')
    parser.add_argument('--dropout', type=float, default=0.3, metavar='<dropout-rate>')
    parser.add_argument('--mlp-activation', type=str, default='relu', metavar='<activation-function>')
    parser.add_argument('--batch-size', type=int, default=32, metavar='<batch-size>')
    parser.add_argument('--learning-rate', type=float, default=0.001, metavar='<learning-rate>')
    parser.add_argument('--aspects', type=int, required=True, metavar='<number of aspects>')
    parser.add_argument('--domain', type=str, required=True, choices=['rest','lapt'], metavar='<domain>')
    parser.add_argument('--cross-val-index', type=int, required=True, choices=range(0,10), metavar='<cross-validation-index>')
    parser.add_argument('--weights', type=str, required=True, metavar='<weights-path>')
    parser.add_argument('--output', type=str, required=True, metavar='<prediction-path>')
    args = parser.parse_args()
    args = parser.parse_args()

    word_vec_dim = 300
    aspect_dim = args.aspects
    polarity_num = 3
    emb_dim = 75
    emb_size = 100
    img_dim = word_vec_dim
    hops = 2

    ######################
    # Model Descriptions #
    ######################
    print('Generating and compiling model...')
    model = CreateGraph(emb_dim, hops, 'relu', args.mlp_hidden_units, args.mlp_hidden_layers, word_vec_dim, aspect_dim, img_dim, emb_size, polarity_num)

    # loss and optimizer
    adagrad = Adagrad(lr=args.learning_rate)
    model.compile(loss={'output':'categorical_crossentropy'}, optimizer=adagrad)
    model.load_weights(args.weights)
    print('Compilation finished.')
    print('Time: %f s' % (time.time()-start_time))

    ######################
    #      Load Data     #
    ######################
    print('Loading data...')

    # aspect mapping
    asp_map = LoadAspectMap(args.domain)
    # sentences
    te_sents = LoadSentences(args.domain, 'te', args.cross_val_index)
    # aspects
    te_asps = LoadAspects(args.domain, 'te', args.cross_val_index, asp_map)
    print('Finished loading data.')
    print('Time: %f s' % (time.time()-start_time))

    #####################
    #       GloVe       #
    #####################
    print('Loading GloVe vectors...')

    word_embedding, word_map = LoadGloVe()
    print('GloVe vectors loaded')
    print('Time: %f s' % (time.time()-start_time))


    #####################
    #      Encoders     #
    #####################
    asp_encoder = GetAspectEncoder(asp_map)
    lab_encoder = joblib.load('models/'+args.domain+'_labelencoder_'+str(args.cross_val_index)+'.pkl')

    ######################
    #    Make Batches    #
    ######################
    print('Making batches...')

    # validation batches
    te_sent_batches = [ b for b in MakeBatches(te_sents, args.batch_size, fillvalue=te_sents[-1]) ]
    te_asp_batches = [ b for b in MakeBatches(te_asps, args.batch_size, fillvalue=te_asps[-1]) ]

    print('Finished making batches.')
    print('Time: %f s' % (time.time()-start_time))

    ######################
    #      Testing       #
    ######################

    # start testing
    print('Testing started...')
    pbar = generic_utils.Progbar(len(te_sent_batches)*args.batch_size)

    predictions = []
    # testing feedforward
    for i in range(len(te_sent_batches)):
        X_sent_batch = GetSentenceTensor(te_sent_batches[i], word_embedding, word_map)
        X_asp_batch = GetAspectFeatures(te_asp_batches[i], asp_encoder)
        pred = model.predict_on_batch({'sentence': X_sent_batch, 'aspect': X_asp_batch})
        pred = pred[0]
        pred = np.argmax(pred, axis=1)
        pol = lab_encoder.inverse_transform(pred).tolist()
        predictions.extend(pol)
        pbar.add(args.batch_size)
    SavePredictions(args.output, predictions, len(te_sents))

    print('Testing finished.')
    print('Time: %f s' % (time.time()-start_time))
コード例 #6
0
def main():
    start_time = time.time()

    # argument parser
    parser = argparse.ArgumentParser(prog='test_sent.py',
            description='Test LSTM-NN model for ABSA sentiment classification')
    parser.add_argument('--aspects', type=int, required=True, metavar='<number of aspects>')
    parser.add_argument('--domain', type=str, required=True, choices=['rest','lapt'], metavar='<domain>')
    parser.add_argument('--cross-val-index', type=int, required=True, choices=range(0,10), metavar='<cross-validation-index>')
    parser.add_argument('--model', type=str, required=True, metavar='<model-path>')
    parser.add_argument('--weights', type=str, required=True, metavar='<weights-path>')
    parser.add_argument('--output', type=str, required=True, metavar='<prediction-path>')
    args = parser.parse_args()

    sent_vec_dim = 300
    batch_size = 128
    aspect_dim = args.aspects
    polarity_num = 3

    #######################
    #      Load Model     #
    #######################
    print('Loading model and weights...')
    model = model_from_json(open(args.model,'r').read())
    model.compile(loss='categorical_crossentropy', optimizer='adagrad')
    model.load_weights(args.weights)
    print('Model and weights loaded.')
    print('Time: %f s' % (time.time()-start_time))

    ######################
    #      Load Data     #
    ######################
    print('Loading data...')

    # aspect mapping
    asp_map = LoadAspectMap(args.domain)
    # sentences
    te_sents = LoadSentences(args.domain, 'te', args.cross_val_index)
    # aspects
    te_asps = LoadAspects(args.domain, 'te', args.cross_val_index, asp_map)
    print('Finished loading data.')
    print('Time: %f s' % (time.time()-start_time))

    #####################
    #       GloVe       #
    #####################
    print('Loading GloVe vectors...')
    word_embedding, word_map = LoadGloVe()
    print('GloVe vectors loaded')
    print('Time: %f s' % (time.time()-start_time))

    #####################
    #      Encoders     #
    #####################
    asp_encoder = GetAspectEncoder(asp_map)
    lab_encoder = joblib.load('models/'+args.domain+'_labelencoder_'+str(args.cross_val_index)+'.pkl')

    ######################
    #    Make Batches    #
    ######################
    print('Making batches...')

    # te batches
    te_sent_batches = [ b for b in MakeBatches(te_sents, batch_size, fillvalue=te_sents[-1]) ]
    te_asp_batches = [ b for b in MakeBatches(te_asps, batch_size, fillvalue=te_asps[-1]) ]

    print('Finished making batches.')
    print('Time: %f s' % (time.time()-start_time))

    ######################
    #      Testing       #
    ######################

    # start testing
    print('Testing started...')
    pbar = generic_utils.Progbar(len(te_sent_batches)*batch_size)

    predictions = []
    # testing feedforward
    for i in range(len(te_sent_batches)):
        X_sent_batch = GetSentenceTensor(te_sent_batches[i], word_embedding, word_map)
        X_asp_batch = GetAspectFeatures(te_asp_batches[i], asp_encoder)
        pred = model.predict_classes([X_sent_batch, X_asp_batch], batch_size, verbose=0)
        pol = lab_encoder.inverse_transform(pred).tolist()
        predictions.extend(pol)
        pbar.add(batch_size)
    SavePredictions(args.output, predictions, len(te_sents))

    print('Testing finished.')
    print('Time: %f s' % (time.time()-start_time))