コード例 #1
0
def main():
    # Argument Parser
    parser = argparse.ArgumentParser()
    parser.add_argument("-w",
                        "--weights",
                        type=str,
                        required=True,
                        help="model weights")
    parser.add_argument("-m",
                        "--model",
                        type=str,
                        required=True,
                        help="model json")
    parser.add_argument("-p",
                        "--prediction",
                        type=str,
                        required=True,
                        help="prediction file")
    args = parser.parse_args()

    # Load data
    print 'Loading data...'
    X_test = load_data()[1][0]
    TEST_ID = '../Data/pkl/img_q_id_test'
    TEST_ID_PKL = pickle.load(open(TEST_ID + '.pkl', 'rb'))

    # Create Model
    print 'Loading model...'
    model = model_from_json(open(args.model).read())
    model.load_weights(args.weights)

    # Predict
    print 'Predicting...'
    probs = model.predict(X_test, batch_size=128)
    ids = map(nameToId,
              [TEST_ID_PKL[idx][1] for idx in range(len(TEST_ID_PKL))])

    answers = map(numToC, np.argmax(probs[:, :5], axis=1).tolist())

    prediction = zip(ids, answers)

    # Write to CSV
    print 'Writing to CSV...'
    with open(args.prediction, 'wb') as fout:
        c = csv.writer(fout, delimiter=',')
        c.writerow(['q_id', 'ans'])
        c.writerows(prediction)

    print 'Done'
コード例 #2
0
ファイル: predict.py プロジェクト: iammrhelo/MLDS
def main():
    # Argument Parser
    parser = argparse.ArgumentParser()
    parser.add_argument("-w", "--weights", type=str, required=True, help="model weights")
    parser.add_argument("-m", "--model", type=str, required=True, help="model json")
    parser.add_argument("-p", "--prediction", type=str, required=True, help="prediction file")
    args = parser.parse_args()

    # Load data
    print "Loading data..."
    X_test = load_data()[1][0]
    TEST_ID = "../Data/pkl/img_q_id_test"
    TEST_ID_PKL = pickle.load(open(TEST_ID + ".pkl", "rb"))

    # Create Model
    print "Loading model..."
    model = model_from_json(open(args.model).read())
    model.load_weights(args.weights)

    # Predict
    print "Predicting..."
    probs = model.predict(X_test, batch_size=128)
    ids = map(nameToId, [TEST_ID_PKL[idx][1] for idx in range(len(TEST_ID_PKL))])

    answers = map(numToC, np.argmax(probs[:, :5], axis=1).tolist())

    prediction = zip(ids, answers)

    # Write to CSV
    print "Writing to CSV..."
    with open(args.prediction, "wb") as fout:
        c = csv.writer(fout, delimiter=",")
        c.writerow(["q_id", "ans"])
        c.writerows(prediction)

    print "Done"
コード例 #3
0
# Parameters
batch_size = 200
nb_epoch = 20
verbose = 1
validation_split = 0.1
shuffle = True
show_accuracy = True

MODEL_ROOT = '../models/elephas/'
PREDICTION_ROOT = '../predictions/'

MODEL = 'sixlayeradagrad_noLSTM_batch_{}'.format(batch_size)

print 'Loading data...'
(X_train,Y_train),(X_test,Y_test) = load_data()
TEST_ID = '../Data/pkl/img_q_id_test'
TEST_ID_PKL = pickle.load(open(TEST_ID+'.pkl','rb'))
ids   = map(nameToId,[ TEST_ID_PKL[idx][1] for idx in range(len(TEST_ID_PKL)) ])

print 'Building model...'
model = Akar.keras_model(1)

#print 'Defining callbacks...'
#checkpoint = ModelCheckpoint('../models/elephas/checkpoint_'+MODEL+'.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto')
#earlystopping = EarlyStopping(monitor='val_loss', patience=2, verbose=0)

print 'Start training...'
for epoch in [20,40,60,80,100,120,140,160,180,200]:
    model.fit( X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=[],validation_split=validation_split, shuffle=shuffle,show_accuracy=show_accuracy)
    model.save_weights(MODEL_ROOT+MODEL+"_{}.h5".format(epoch))
コード例 #4
0
import argparse
import cPickle as pickle
import csv
import numpy as np
import pdb
import sys

import Keras_model_deep_noLSTM as Akar
import vqa

(X_train,Y_train),(X_test,Y_test) = vqa.load_data()

PREDICTION_FILE_NAME = '../predictions/test_elephat_test'
MODEL_NAME = '../models/elephas/overfit_noLSTM_100.h5'

TEST_ID = '../Data/pkl/img_q_id_test'

TEST_ID_PKL = pickle.load(open(TEST_ID+'.pkl','rb'))

print "start making model..."
model = Akar.keras_model(1)
model.load_weights(MODEL_NAME)

print "Start testing..."
prediction = model._predict([X_test])

def nameToId(ans_string):
    return '{0:{fill}{align}7}'.format(ans_string,fill='0',align='>')

def numToC(ans_int):
    if ans_int == 0:
コード例 #5
0
# Parameters
batch_size = 200
nb_epoch = 20
verbose = 1
validation_split = 0.1
shuffle = True
show_accuracy = True

MODEL_ROOT = '../models/elephas/'
PREDICTION_ROOT = '../predictions/'

MODEL = 'attention_noLSTM_batch_{}'.format(batch_size)

print 'Loading data...'
(X_train,Y_train),(X_test,Y_test) = load_data()
TEST_ID = '../Data/pkl/img_q_id_test'
TEST_ID_PKL = pickle.load(open(TEST_ID+'.pkl','rb'))
ids   = map(nameToId,[ TEST_ID_PKL[idx][1] for idx in range(len(TEST_ID_PKL)) ])

print 'Building model...'
model = Akar.keras_model(1)

#print 'Defining callbacks...'
#checkpoint = ModelCheckpoint('../models/elephas/checkpoint_'+MODEL+'.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto')
#earlystopping = EarlyStopping(monitor='val_loss', patience=2, verbose=0)

print 'Start training...'
for epoch in [20,40,60,80,100]:
    model.fit( X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=[],validation_split=validation_split, shuffle=shuffle,show_accuracy=show_accuracy)
    model.save_weights(MODEL_ROOT+MODEL+"_{}.h5".format(epoch))
コード例 #6
0
ファイル: ensemble.py プロジェクト: iammrhelo/MLDS
def main():
    model_root = '../models/'
    model_json = '../models/json/'
    # Models to ensemble 
    # ( 'model_name','model_type')
    model_types = [ 
    ('elephas/overfit_noLSTM_100.h5','deep_noLSTM_1.json'),
    #('no_lstm_5layer_250.h5','noLSTM_1.json'),
    ('elephas/overfit_noLSTM_batch_100_100.h5','deep_noLSTM_1.json'),
    ('elephas/overfit_noLSTM_batch_500_80.h5','deep_noLSTM_1.json'), 
    ('elephas/overfit_noLSTM_batch_1000_60.h5','deep_noLSTM_1.json')
    ] 
     
    # Load data
    print 'Loading data...'
    X_test = load_data()[1][0]
    TEST_ID = '../Data/pkl/img_q_id_test'
    TEST_ID_PKL = pickle.load(open(TEST_ID+'.pkl','rb'))
    numoftest = X_test.shape[0] 
    # Load Models
    print 'Loading models...'
    
    models = []
    for weights, json in model_types:
        m = model_from_json(open(model_json+json).read())
        m.load_weights(model_root+weights)
        models.append(m)
    
    # Predict
    print 'Predicting...'
    grouped_answers = []
    for model in models:
        probs = model.predict(X_test,batch_size=128)
        answers= map(numToC,np.argmax(probs,axis=1).tolist())      
        grouped_answers.append(answers)

    # Ensemble
    print 'Ensembling...'
    answers = []
    for idx in xrange(numoftest):
        curlist = []
        for m in range(len(models)):
            curlist.append(grouped_answers[m][idx]) 
        max_cnt = Counter(curlist)
        m = max( v for _, v in max_cnt.iteritems())
        r = [ k for k, v in max_cnt.iteritems() if v == m ]
        shuffle(r)
        answers.append(r[0])


    # Write to CSV
    ids   = map(nameToId,[ TEST_ID_PKL[idx][1] for idx in range(len(TEST_ID_PKL)) ])
    prediction = zip(ids,answers)


    print 'Writing to CSV...'
    with open('test_ensemble.csv','wb') as fout:
        c = csv.writer(fout,delimiter =',')
        c.writerow(['q_id','ans'])
        c.writerows(prediction)
    
    print 'Done'
コード例 #7
0
ファイル: ensemble.py プロジェクト: ChunHungLiu/MLDS
def main():
    model_root = '../models/'
    model_json = '../models/json/'
    # Models to ensemble
    # ( 'model_name','model_type')
    model_types = [
        ('elephas/overfit_noLSTM_100.h5', 'deep_noLSTM_1.json'),
        #('no_lstm_5layer_250.h5','noLSTM_1.json'),
        ('elephas/overfit_noLSTM_batch_100_100.h5', 'deep_noLSTM_1.json'),
        ('elephas/overfit_noLSTM_batch_500_80.h5', 'deep_noLSTM_1.json'),
        ('elephas/overfit_noLSTM_batch_1000_60.h5', 'deep_noLSTM_1.json')
    ]

    # Load data
    print 'Loading data...'
    X_test = load_data()[1][0]
    TEST_ID = '../Data/pkl/img_q_id_test'
    TEST_ID_PKL = pickle.load(open(TEST_ID + '.pkl', 'rb'))
    numoftest = X_test.shape[0]
    # Load Models
    print 'Loading models...'

    models = []
    for weights, json in model_types:
        m = model_from_json(open(model_json + json).read())
        m.load_weights(model_root + weights)
        models.append(m)

    # Predict
    print 'Predicting...'
    grouped_answers = []
    for model in models:
        probs = model.predict(X_test, batch_size=128)
        answers = map(numToC, np.argmax(probs, axis=1).tolist())
        grouped_answers.append(answers)

    # Ensemble
    print 'Ensembling...'
    answers = []
    for idx in xrange(numoftest):
        curlist = []
        for m in range(len(models)):
            curlist.append(grouped_answers[m][idx])
        max_cnt = Counter(curlist)
        m = max(v for _, v in max_cnt.iteritems())
        r = [k for k, v in max_cnt.iteritems() if v == m]
        shuffle(r)
        answers.append(r[0])

    # Write to CSV
    ids = map(nameToId,
              [TEST_ID_PKL[idx][1] for idx in range(len(TEST_ID_PKL))])
    prediction = zip(ids, answers)

    print 'Writing to CSV...'
    with open('test_ensemble.csv', 'wb') as fout:
        c = csv.writer(fout, delimiter=',')
        c.writerow(['q_id', 'ans'])
        c.writerows(prediction)

    print 'Done'