Exemple #1
0
def load_VGG_model():
    print("Loading VGG model")
    global Generating_model_1, Generating_model_2

    global VGG_model, Generating_model
    pretrain_model = model_from_json(open('models/CNN_pretrained_model.json').read())

    pretrain_model.load_weights('models/vgg16_weights.h5')

    VGG_model = model_from_json(open('models/CNN_model.json').read())

    for k in range(len(pretrain_model.layers)):
        weights_loaded = pretrain_model.layers[k].get_weights()
        if k < 35:
            VGG_model.layers[k].set_weights(weights_loaded)

    VGG_model.compile(loss='categorical_crossentropy', optimizer='sgd')

    with open('models/vocab_list_'+dataname+'.pkl', 'rb') as f:
        vocabs = pickle.load(f)
        for v in vocabs:
            if v not in vocab:
                update_vocab(v)

    Generating_model_1 = load_model(rnn_model_name, 'models/'+dataname+'/best_' + rnn_model_name + '_model_1_output_rnn_'
                                    +str(output_rnn_dim)+'_weights_iteration_20.h5', 1)

    Generating_model_2 = load_model(rnn_model_name, 'models/' + dataname + '/best_' + rnn_model_name + '_model_4_output_rnn_'
                                    + str(output_rnn_dim) +'_weights_iteration_24.h5', 4)
Exemple #2
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Exemple #3
0
    def load_model(self, model_name):
        """ 
        reading the model from disk - including all the trained weights and the complete model design (hyperparams, planes,..)
        """
    
        locpath="./"
        arch_name = locpath + model_name + '_architecture.json'
        weight_name = locpath + model_name + '_weights.h5'
    
        if not os.path.isfile(arch_name) or not os.path.isfile(weight_name):
            print("model_name given and file %s and/or %s not existing. Aborting." % (arch_name, weight_name))
            sys.exit()

        print("Loaded model: ",model_name)

        try:
            model = model_from_json(open(arch_name).read(),{'Convolution1D_Transpose_Arbitrary':Convolution1D_Transpose_Arbitrary})
        except NameError:
            try:
                model = model_from_json(open(arch_name).read())
            except:
                rmsprop = RMSprop (lr=0.00001, rho=0.9, epsilon=1e-06)  # for sequence length 500
                loss="categorical_crossentropy"
                model = ufcnn_model_concat_shift(regression = False, output_dim=3, features=32, loss=loss, sequence_length=499, optimizer=rmsprop)

        model.load_weights(weight_name)
        return model
Exemple #4
0
 def init2(self):
     try:
         print "AA00"
         #SLPOLICY
         sgd1 = SGD(lr=lrate, decay=0.0, momentum=0.0, nesterov=False)
         print "AA01:" + SLPOLICY_JSON
         t = model_from_json(open(self.curpath + SLPOLICY_JSON).read())
         print "AA01-1"
         self.m_SLPOLICY = t
         print "AA02"
         self.m_SLPOLICY.load_weights(self.curpath + SLPOLICY_H5)
         print "AA03"
         self.m_SLPOLICY.compile(loss='categorical_crossentropy', optimizer=sgd1)
         print "AA04"
         
         #ROLLOUT
         sgd2 = SGD(lr=lrate, decay=0.0, momentum=0.0, nesterov=False)
         print "AA10:" + ROLLOUT_JSON
         s = model_from_json(open(self.curpath + ROLLOUT_JSON).read())
         print "AA11-1"
         self.m_ROLLOUT = s
         print "AA11"
         self.m_ROLLOUT.load_weights(self.curpath + ROLLOUT_H5)
         print "AA12"
         self.m_ROLLOUT.compile(loss='categorical_crossentropy', optimizer=sgd2)
         print "AA13"
         
         #VALUE
         sgd3 = SGD(lr=0.003, decay=0.0, momentum=0.0, nesterov=False)
         print "AA20:" + VALUE_JSON
         s = model_from_json(open(self.curpath + VALUE_JSON).read())
         print "AA21-1"
         self.m_VALUE = s
         print "AA21"
         self.m_VALUE.load_weights(self.curpath + VALUE_H5)
         print "AA22"
         self.m_VALUE.compile(loss='MSE', optimizer=sgd3)
         print "AA23"
         
         #RLPOLICY
         sgd4 = SGD(lr=lrate, decay=0.0, momentum=0.0, nesterov=False)
         print "AA31:" + RLPOLICY_JSON
         t = model_from_json(open(self.curpath + RLPOLICY_JSON).read())
         print "AA31-1"
         self.m_RLPOLICY = t
         print "AA32"
         self.m_RLPOLICY.load_weights(self.curpath + RLPOLICY_H5)
         print "AA33"
         self.m_RLPOLICY.compile(loss='categorical_crossentropy', optimizer=sgd4)
         print "AA34"
         
         self.init = True
         
         print "load pass"
     except:
         print "load failed:", sys.exc_info()[0]
         pass
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
def get_ensemble_score(name):
    if os.path.exists(util.features_prefix + name + "_XXXYYY.pkl") is False:
        print 'file does not exist'
        exit()
    [X_train, X_validate, X_test, y_train, y_validate, y_test] = pd.read_pickle(
        util.features_prefix + name + '_XXXYYY.pkl')
    import xgboost as xgb

    rf_clf_2 = pd.read_pickle(util.models_prefix + name + '_rf.pkl')
    list_all = []
    rf_2_list = rf_clf_2.predict_proba(X_test)
    from sklearn.feature_selection import SelectFromModel
    list_all.append(rf_2_list)
    xgb_2 = xgb.Booster({'nthread': 4})  # init model
    xgb_2.load_model(util.models_prefix + name + '_xgb_prob.pkl')  # load data
    dtest = xgb.DMatrix(X_test)
    xgb_2_test = xgb_2.predict(dtest)
    list_all.append(xgb_2_test)
    # list_all.append(xgb_1_test)
    import copy
    [train_X, train_Y] = pd.read_pickle(util.features_prefix + name + '_XY.pkl')
    X_semantic = np.array(copy.deepcopy(X_test[:, range(95, 475)]))
    X_manual = np.array(copy.deepcopy(X_test[:, range(0, 95)]))
    X_cluster = np.array(copy.deepcopy(X_test[:, range(475, 545)]))
    X_document = np.array(copy.deepcopy(X_test[:, range(545, 547)]))
    X_document[:, [0]] = X_document[:, [0]] + train_X[:, [-1]].max()
    X_semantic = X_semantic.reshape(X_semantic.shape[0], 10, -1)
    X_semantic_1 = np.zeros((X_semantic.shape[0], X_semantic.shape[2], X_semantic.shape[1]))
    for i in range(int(X_semantic.shape[0])):
        X_semantic_1[i] = np.transpose(X_semantic[i])
    json_string = pd.read_pickle(util.models_prefix + name + '_json_string_cnn.pkl')
    model_cnn = model_from_json(json_string)
    model_cnn.load_weights(util.models_prefix + name + '_nn_weight_cnn.h5')
    cnn_list = model_cnn.predict_proba([X_document, X_cluster, X_manual, X_semantic_1])
    # cnn_list_prob = model_cnn.predict_proba([X_document, X_cluster, X_manual, X_semantic_1])
    kk = list(cnn_list)
    list_all.append(kk)
    json_string = pd.read_pickle(util.models_prefix + name + '_json_string_lstm.pkl')
    model_lstm = model_from_json(json_string)
    model_lstm.load_weights(util.models_prefix + name + '_nn_weight_lstm.h5')
    lstm_list = model_lstm.predict_proba([X_document, X_cluster, X_manual, X_semantic_1])
    # cnn_list_prob = model_cnn.predict_proba([X_document, X_cluster, X_manual, X_semantic_1])
    kk = list(lstm_list)
    list_all.append(kk)
    temp_list = []
    for i in range(len(y_test)):
        temp = np.zeros(len(list_all[0][0]))
        for z in list_all:
            temp += np.array(z[i])
        temp_list.append(temp)
    evaluate_k_recall(1, y_test, temp_list)

    print '**************************'
Exemple #7
0
def test_constant_initializer_with_numpy():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,),
                    kernel_initializer=Constant(np.ones((3, 2)))))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    json_str = model.to_json()
    model_from_json(json_str).summary()

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str).summary()
Exemple #8
0
    def load(self, best=False):
        print 'Model.load()'

        name   = '%s_%s'%(self.project.id, self.project.type)
        prefix = 'best' if best else 'latest'

        if self.offline:
            name = '%s_offline'%(name)
        elif best:
            revision = DB.getRevision( self.id )
            prefix   = '%s_%d'%(prefix, revision)

        # construct the path to the network and weights
        path = '%s/%s_%s'%(Paths.Models, prefix, name)
        j_path = '%s.json'%(path)
        w_path = '%s_weights.h5'%(path)

        j_path = j_path.lower()
        w_path = w_path.lower()
       
        if not os.path.exists( j_path ) or not os.path.exists( w_path ):
            return False

        print 'loading model...'
        self.model = model_from_json(open( j_path ).read())
        self.model.load_weights( w_path )
        return True
 def load(self):
     f = open(self.lm, "r")
     self.model = model_from_json(f.read())
     self.model.load_weights(self.sm+".weights")
     self.model.compile(loss='categorical_crossentropy',
                     optimizer='adam')
     print "Network compile completed..."
 def load(file_name):
     json_file_name, h5_file_name = SequenceModel.get_full_file_names(file_name)
     model = model_from_json(open(json_file_name, 'r').read())
     model.compile(optimizer='rmsprop', loss='mse')
     model.load_weights(h5_file_name)
     print('Loaded file ', file_name)
     return SequenceModel(model=model)
Exemple #11
0
def model_from_json(json_file, weights_file=None, compile=True):
    import keras.models as kmodels
    with open(json_file, 'r') as f:
        model = f.read()
    model = kmodels.model_from_json(model, compile=compile)
    model.load_weights(weights_file)
    return model
Exemple #12
0
def simple_cnn_vgg_like(lr=1e-3, weights_path=None):
    img_rows, img_cols = 210, 70
    # standard VGG16 network architecture
    
    structure_path = "%s/cache/simple_cnn_vgg_like.json" % config.project.project_path
    if weights_path is not None and os.path.exists(weights_path) \
        and os.path.exists(structure_path):

        logger.debug("load weigth from fine-tuning weight %s" % weights_path)
        model = model_from_json(open(structure_path).read())
        model.load_weights(weights_path)
    else:
        model = Sequential()
        model.add(ZeroPadding2D((1, 1), input_shape=(1, img_rows, img_cols)))
        model.add(Convolution2D(64, 7, 7, activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, 7, 7, activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(Flatten())
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))

        # replace more fc layer
        model.add(Dense(124, activation='softmax'))

        # load the weights
        logger.debug('Model loaded.')

    sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy',  metrics=['accuracy'])

    return model
def load_model():
    # model_file = '/data/liubo/face/annotate_face_model/light_cnn_61962.model'
    # weight_file = '/data/liubo/face/annotate_face_model/light_cnn_61962.weight'
    # mean_acc: 0.960719550317  ---  theano
    # mean_acc: 0.725701339299  ---  tensorflow
    # 不同backend训练的模型不能通用

    # model_file = '/data/liubo/face/annotate_face_model/thin_casia_dlib_light_cnn_10575.model'
    # weight_file = '/data/liubo/face/annotate_face_model/thin_casia_dlib_light_cnn_10575.weight'
    # mean_acc: 0.944297513551

    # model_file = '/data/liubo/face/annotate_face_model/thin_casia_dlib_light_cnn_local_10575.model'
    # weight_file = '/data/liubo/face/annotate_face_model/thin_casia_dlib_light_cnn_local_10575.weight'
    # mean_acc: 0.965546027452

    # model_file = '/data/liubo/face/annotate_face_model/thin_casia_dlib_light_cnn_local_10575_augment.model'
    # weight_file = '/data/liubo/face/annotate_face_model/thin_casia_dlib_light_cnn_local_10575_augment.weight'
    # mean_acc: 0.962660355098

    # model_file = '/data/liubo/face/annotate_face_model/thin_ms_dlib_light_cnn_tf_79078.model'
    # weight_file = '/data/liubo/face/annotate_face_model/thin_ms_dlib_light_cnn_tf_79078.weight'
    # mean_acc: 0.966566141982

    model_file = '/data/liubo/face/annotate_face_model/thin_ms_dlib_light_cnn_tf_augment_79078.model'
    weight_file = '/data/liubo/face/annotate_face_model/thin_ms_dlib_light_cnn_tf_augment_79078.weight'
    # mean_acc: 0.972336911168

    if os.path.exists(model_file) and os.path.exists(weight_file):
        print 'load model'
        model = model_from_json(open(model_file, 'r').read())
        opt = Adam()
        model.compile(optimizer=opt, loss=['categorical_crossentropy'])
        print 'load weights'
        model.load_weights(weight_file)
        return model
def read_model(version, time_start):
    with open("../Temp/DRL_model_v" + version + "_" + time_start + ".json", "r") as jfile:
        target_model = model_from_json(json.load(jfile))
    target_model.load_weights("../Temp/DRL_model_v" + version + "_" + time_start + ".h5")
    target_model.compile("sgd", "mse")
    jfile.close()
    return target_model
Exemple #15
0
    def Load(self,InDir=False,MetaDataOnly=False,Overwrite=False):

        if InDir:
            self.InDir = InDir

        if not MetaDataOnly:
            self.Model = model_from_json( open(self.InDir+"/Model.json", "r").read() )
            self.Model.load_weights(self.InDir+"/Weights.h5")

        MetaData=pickle.load( open(self.InDir+"/MetaData.pickle","rb"))
        self.MetaData.update(MetaData)
        self.MetaData["InputMetaData"]=[MetaData]
        self.MetaData["InputDir"]=self.InDir

        NoneType=type(None)

        if "Optimizer" in self.MetaData.keys():
            self.Optimizer=self.MetaData["Optimizer"]

        if type(self.Optimizer)==NoneType:
            self.Optimizer="sgd"

        if "Loss" in self.MetaData.keys():
            self.Loss=self.MetaData["Loss"]

        if type(self.Loss)==NoneType:
            self.Loss="mse"

        self.Initialize(Overwrite=Overwrite)
def checkModelForFolder(modelName, folderName, testData, weightsFile):
    context_data, question_data, answer_data, y_test, text = testData
    json_file = open(folderName + 'structures/' + modelName, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(folderName + 'structures/' + weightsFile)

    opt = keras.optimizers.Nadam()
    loaded_model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['acc'])
    result = loaded_model.predict({'context': context_data, 'question': question_data, 'answer': answer_data}, batch_size=300)
    real_c = [np.argmax(x) for x in y_test]

    correct = 0
    total = 0
    for i in range(int(len(real_c) / 4)):
        local_res = result[i*4:i*4+4]
        real_result = np.argmax(real_c[i*4:i*4+4])
        aux = np.argmax([x[1] for x in local_res])
        if aux == real_result:
            correct+=1
        total+=1
    print(correct)
    print(total)
    print(correct / total)
def test_recursive():
    # test layer-like API
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)

    # test serialization
    config = seq.get_config()
    new_graph = Sequential.from_config(config)

    seq.summary()
    json_str = seq.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = seq.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Exemple #18
0
 def load_network(self):
     print 'Loading network...'
     model = model_from_json(open('model.json').read())  # load model from file
     model.load_weights('network_weights.h5')  # load network weights
     model.compile(loss='mse', optimizer='rmsprop')
     print 'Network loaded!'
     self.net = model
Exemple #19
0
 def load(arch_fname, weights_fname=None):
     from keras.models import model_from_json
     model_json_string = open(arch_fname).read()
     sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
     if weights_fname is not None:
         sequence_dnn.model.load_weights(weights_fname)
     return sequence_dnn
def test_1o_2i():
    # test a non-sequential graph with 2 inputs and 1 output
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_input(name='input2', input_shape=(32,))

    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input2')
    graph.add_node(Dense(4), name='dense3', input='dense1')

    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')
    graph.compile('rmsprop', {'output1': 'mse'})

    graph.fit({'input1': X_train_graph, 'input2': X2_train_graph, 'output1': y_train_graph},
              nb_epoch=2)
    out = graph.predict({'input1': X_test_graph, 'input2': X2_test_graph})
    assert(type(out == dict))
    assert(len(out) == 1)

    loss = graph.test_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.train_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.evaluate({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})

    # test serialization
    config = graph.get_config()
    new_graph = Graph.from_config(config)

    graph.summary()
    json_str = graph.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = graph.to_yaml()
    new_graph = model_from_yaml(yaml_str)
def test_siamese_1():
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_input(name='input2', input_shape=(32,))

    graph.add_shared_node(Dense(4), name='shared', inputs=['input1', 'input2'], merge_mode='sum')
    graph.add_node(Dense(4), name='dense1', input='shared')
    # graph.add_node(Dense(4), name='output1', input='shared', create_output=True)

    # graph.add_output(name='output1', inputs=['dense1', 'shared'], merge_mode='sum')
    graph.add_output(name='output1', input='dense1')
    graph.compile('rmsprop', {'output1': 'mse'})

    graph.fit({'input1': X_train_graph, 'input2': X2_train_graph, 'output1': y_train_graph},
              nb_epoch=10)
    out = graph.predict({'input1': X_test_graph, 'input2': X2_test_graph})
    assert(type(out == dict))
    assert(len(out) == 1)

    loss = graph.test_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.train_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.evaluate({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    assert(loss < 5.0)

    # test serialization
    config = graph.get_config()
    new_graph = Graph.from_config(config)

    graph.summary()
    json_str = graph.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = graph.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Exemple #22
0
def load_model():
    global model
    model = model_from_json(open('lan_model.json').read())
    model.load_weights('lan_model.h5')
    model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
Exemple #23
0
def main():
    batch_size = 64
    nb_epoch = 20
    img_size = 256
    gen = read_data.read_data_photo_labels(
            2000, img_size, batch_size = batch_size)
    X_test, Y_test = next(gen)

  # model = get_image_model(img_size)

    with open(JSON_NAME, 'r') as jfile:
        model = models.model_from_json(jfile.read())
    model.load_weights(WEIGHTS_NAME)

  # model.fit_generator(gen, 
  #         samples_per_epoch=10000, nb_epoch=nb_epoch, verbose=1)

    test_pred = np.sign(model.predict(X_test))
    test_loss = model.evaluate(X_test, Y_test)
    np.savetxt('pred.csv', test_pred, delimiter=',')

  # with open(JSON_NAME, 'w') as jfile:
  #     jfile.write(model.to_json())
  # model.save_weights(WEIGHTS_NAME, overwrite=True)

    print('Test loss: ', test_loss)
    print('Test accuracy: ', score.accuracy(test_pred, Y_test))
    print('F1 score: ', score.f1score(test_pred, Y_test))
    print('F1 score by class:')
    score_byclass = score.f1_by_class(test_pred, Y_test)
    for c, s in enumerate(score_byclass):
        print(c, ':', s)
Exemple #24
0
    def load(self):
        print('Re-loading preprocessor...')
        self.preprocessor = pickle.load(open(os.sep.join((self.model_dir, \
                                    'preprocessor.p')), 'rb'))
        print('Re-loading pretrainer...')
        self.pretrainer = pickle.load(open(os.sep.join((self.model_dir, \
                                    'pretrainer.p')), 'rb'))
        print('Re-building model...')
        self.model = model_from_json(open(os.sep.join((self.model_dir, 'model_architecture.json'))).read())
        self.model.load_weights(os.sep.join((self.model_dir, 'model_weights.hdf5')))

        loss_dict = {}
        idx_cnt = 0
        if self.include_lemma:
            loss_dict['lemma_out'] = 'categorical_crossentropy'
            self.lemma_out_idx = idx_cnt
            idx_cnt += 1
            print('Loading known lemmas...')
            self.known_lemmas = pickle.load(open(os.sep.join((self.model_dir, \
                                    'known_lemmas.p')), 'rb'))

        if self.include_pos:
            loss_dict['pos_out'] = 'categorical_crossentropy'
            self.pos_out_idx = idx_cnt
            idx_cnt += 1
        if self.include_morph:
            self.morph_out_idx = idx_cnt
            idx_cnt += 1
            if self.include_morph == 'label':
              loss_dict['morph_out'] = 'categorical_crossentropy'
            elif self.include_morph == 'multilabel':
              loss_dict['morph_out'] = 'binary_crossentropy'

        self.model.compile(optimizer='adadelta', loss=loss_dict)
Exemple #25
0
def load_model(iteration=0, path=test_series_name):
	if load_from_previous_trial:
		load_path = "output/{}.{}.{}.json".format(path, trial_to_load, iteration)
	else:
		load_path = "output/{}".format(model_file_name)
	model = open(load_path, 'r').read()
	return model_from_json(model)
 def load_saved_model(self,filename,filename2):
     print("Loading model ")
     model = model_from_json(open(filename).read())
     print("Loading weights")
     model.load_weights(filename2)
     print("Model loaded")
     return model
    def __init__(self,
                 model_name, data_set, model_inference=[], test_batch_size=64, n_iter=50, model_arch_file='',
                 model_weight_file='', model_save_path='', prediction_save_file=''):
        self._model_name = model_name
        self._data_set = data_set
        # self._data_set._batch_size = batch_size
        self._n_iter = n_iter
        self._model_arch_file = model_arch_file
        self._model_weight_file = model_weight_file
        self._model_save_path = model_save_path
#        if model_name == 'vgg_keras':
#            img_size = data_set.get_img_size()
#            self._model = vgg_std16_model(img_rows=img_size[1], img_cols=img_size[2], color_type=3,
#                                          model_weights_file=model_weights_file, continueFile='')
        if model_inference:
            self._model = model_inference
        elif model_arch_file:
            self._model = model_from_json(open(self._model_arch_file).read())
            self._model.load_weights(self._model_weight_file)
            print('compiling model %s.....'%(self._model_name))
            self._model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
        else:
            print('Warning: Model Architecture is not defined!')
        
        self._test_batch_size = test_batch_size
        self._prediction_save_file = prediction_save_file
        self._prediction = {}
Exemple #28
0
def test():
	parameters = process_args(sys.argv[1:], __doc__)

	datadir = os.path.split(os.path.realpath(__file__))[0]
	#print 'data dir',datadir
	batch_size = 32
	nb_classes = 7
	nb_epoch = 30
	data_augmentation = True

	# input image dimensions
	img_size = 250
	img_rows, img_cols = img_size, img_size
	# the CIFAR10 images are RGB
	img_channels = 3

	#build model
	model = model_from_json(open(datadir+'/model.json').read())
	model.load_weights(datadir+'/weights.29-0.96.hdf5')

	img = imread(parameters.img_path)#1000*750,默认长边在前才是正方向
	if img.shape[0]<img.shape[1]:
		img=img.transpose(1,0,2)
	img=imresize(img, (img_size,img_size)).transpose(2,0,1)
	              
	img=img[None,:]

	result=model.predict(img)
	result=np.argmax(result.flatten())
	print 'the image is belong to class:',result
	return result
Exemple #29
0
def perform_train_task(task):
    try:
        weight_model = task.weight
        weight_file = weight_model.weight_file.file
        config = TaskBroker().parse_config(task.config)

        f = h5py.File(task.dataset.dataset_file.name,'r')
        model = model_from_json(weight_model.model.definition)
        # compile the model
        apply_dict(model.compile,config['compile'])

        # unpack and eval necessary variables for training 
        dataset_size = DatasetBroker().get_dataset_size(task.dataset.name)
        batch_size = config['fit']['batch_size']
        epoch = config['fit']['epoch']
        steps = dataset_size / batch_size
        callbacks = config['fit']['callbacks'] if 'callbacks' in config['fit'] else None

        # train and return the history object
        result = model.fit_generator(get_data_generator(f,dataset_size,batch_size),steps,epoch,callbacks = callbacks)

        f.close()
        return result

    except:
        raise
        return None
Exemple #30
0
def loadModel(location):
    from keras.models import model_from_json
    with open(location+".json",'rb') as f:
        json_string     = f.read()
    model = model_from_json(json_string)
    model.load_weights(location+"weights.h5")
    return model
Exemple #31
0
    fold2label = dict()
    for i in xrange(len(sequence_file)):
        if sequence_file[i].find('Label') > 0:
            print "Skip line ", sequence_file[i]
            continue
        fold = sequence_file[i].rstrip().split('\t')[0]
        label = int(sequence_file[i].rstrip().split('\t')[1])
        if label not in fold2label:
            fold2label[label] = fold

    json_file_model = open(model_file, 'r')
    loaded_model_json = json_file_model.read()
    json_file_model.close()
    DLS2F_ResCNN = model_from_json(loaded_model_json,
                                   custom_objects={
                                       'Dynamick_max_pooling1d':
                                       Dynamick_max_pooling1d,
                                       'K_max_pooling1d': K_max_pooling1d
                                   })

    print "######## Loading existing weights ", model_weight
    DLS2F_ResCNN.load_weights(model_weight)
    DLS2F_ResCNN.compile(loss="categorical_crossentropy",
                         metrics=['accuracy'],
                         optimizer="nadam")
    get_flatten_layer_output = K.function(
        [DLS2F_ResCNN.layers[0].input,
         K.learning_phase()],
        [DLS2F_ResCNN.layers[-3].output])  # input to flatten layer

    Testlist_data_keys = dict()
    Testlist_targets_keys = dict()
Exemple #32
0
def read_model(network_path):
    exit_ifnex(network_path)
    model = model_from_json(
        open(os.path.join(network_path, 'architecture.json')).read())
    model.load_weights(os.path.join(network_path, 'weights.h5'))
    return model
Exemple #33
0
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 2)),
              epochs=1,
              batch_size=10)

    # test config
    model.get_config()

    # test when specifying a batch_input_shape
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(layers.Dense(2), batch_input_shape=(1, 3, 4)))
    reference.add(layers.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Embedding
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.Embedding(5, 6),
                                 batch_input_shape=(10, 3, 4),
                                 dtype='int32'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
              np.random.random((10, 3, 4, 6)),
              epochs=1,
              batch_size=10)

    # compare to not using batch_input_shape
    test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(layers.Embedding(5, 6),
                                 input_shape=(3, 4),
                                 dtype='int32'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Conv2D
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.Conv2D(5, (2, 2), padding='same'),
                                 input_shape=(2, 4, 4, 3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
                         np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(layers.Dense(3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 3)),
              epochs=1,
              batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(layers.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with BatchNormalization
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.BatchNormalization(center=True,
                                                           scale=True),
                                 name='bn',
                                 input_shape=(10, 2)))
    model.compile(optimizer='rmsprop', loss='mse')
    # Assert that mean and variance are 0 and 1.
    td = model.layers[0]
    assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Train
    model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
                         np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
    # Assert that mean and variance changed.
    assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Verify input_map has one mapping from inputs to reshaped inputs.
    uid = object_list_uid(model.inputs)
    assert len(td._input_map.keys()) == 1
    assert uid in td._input_map
    assert K.int_shape(td._input_map[uid]) == (None, 2)
Exemple #34
0
tsteps = 24
batch_size = 1
epochs = 40
attsize = 2

inputs = pickle.load(open('xAttn.p'))
expected_outputs = pickle.load(open('yAttn.p'))
predicted_outputs = 0

train_inps = inputs[:1200]
train_outs = expected_outputs[:1200]

test_inps = inputs[1200:]
test_outs = expected_outputs[1200:]

model = model_from_json(open('lstm_inet_n_30_drpout.json').read())
model.load_weights('lstm_inet_weights_n_30_drpout.h5')


def ohe_predicted_value(
    previous_input,
    value,
):
    """
    :param previous_input: get previous input to know the index
    :param value: predicted value from the model
    :return: numpy array of dimension 1*24*2


    """
    dim23_array = np.zeros((24, 2))
Exemple #35
0
def DeepFold_predict(SeqArr, model_1, path_prefix, current_file):

    print("Predict 1D prob:")
    seqlen = len(SeqArr)
    Test_1 = np.zeros((seqlen, 6, Winsize), dtype="float32")
    for j in range(seqlen):
        Arr = fill_window(j, SeqArr, Winsize)
        Test_1[j, :, :] = seq_to_mat(Arr)

    Data = Test_1  # Test_1 need to be reshaped, so just copy it for further usage.
    Test_1 = Test_1.reshape(Test_1.shape[0], Test_1.shape[1], Test_1.shape[2],
                            1)
    proba_1 = model_1.predict(Test_1, verbose=0)

    Thr1 = 0.25
    Pair = []  # Nucleotides that are paired
    for j in range(seqlen):
        if (proba_1[j, 1] > Thr1):
            Pair.append(j)

    Pattern = []
    pairnum = len(Pair)
    total = int((1 + pairnum - 1) * (pairnum - 1) / 2)
    Test = np.zeros((total, 9, Winsize), dtype="float32")

    k = 0
    for i in range(pairnum - 1):
        for j in range(i + 1, pairnum):
            if ((SeqArr[Pair[i]] == "A" and
                 (SeqArr[Pair[j]] == "U" or SeqArr[Pair[j]] == "T"))
                    or (SeqArr[Pair[i]] == "C" and SeqArr[Pair[j]] == "G")
                    or (SeqArr[Pair[i]] == "G" and SeqArr[Pair[j]] == "C")
                    or (SeqArr[Pair[i]] == "G" and SeqArr[Pair[j]] == "U")
                    or (SeqArr[Pair[i]] == "U" and SeqArr[Pair[j]] == "G")
                    or ((SeqArr[Pair[j]] == "U" or SeqArr[Pair[j]] == "T")
                        and SeqArr[Pair[j]] == "A")):
                sequence1 = fill_window(Pair[i], SeqArr, Winsize)
                sequence2 = fill_window(Pair[j], SeqArr, Winsize)
                Test[k] = seq_to_mat_2D(sequence1, sequence2)
                Pattern.append([Pair[i], Pair[j]])
                k += 1

    print("Predict 2D prob:")
    Test_2 = np.zeros((k, 9, Winsize), dtype="float32")
    Test_2 = Test[0:k]

    Test_2 = Test_2.reshape(Test_2.shape[0], Test_2.shape[1], Test_2.shape[2],
                            1)

    model_list = get_file_list(path2, ['h5'])
    model_num = len(model_list)
    model_c = model_from_json(
        open(path2 + "0_DeepFold_2D_architecture.json").read())
    for n in range(model_num):
        model_c.load_weights(model_list[n])
        proba_c = model_c.predict(Test_2, verbose=0)
        if (n == 0):
            proba_2 = proba_c
        if (n > 0):
            proba_2 = proba_2 + proba_c
    proba_2 = proba_2 / model_num

    print("Create final:")
    Thr2 = []
    for i in range(50):
        Thr2.append(0.99 - i * 0.01)
    Final = {}

    for K in range(len(Thr2)):
        for i in range(k):
            if (proba_2[i, 1] > Thr2[K]
                    and ((Pattern[i][0] in Final) == False)):
                Final[Pattern[i][0]] = Pattern[i][1]
                Final[Pattern[i][1]] = Pattern[i][0]
                if (Pattern[i][1] in Final):
                    if (Final[Pattern[i][1]] == Pattern[i][0] + 1):
                        Final[Pattern[i][0]] = Pattern[i][1] + 1
                        Final[Pattern[i][1] + 1] = Pattern[i][0]
                    elif (Final[Pattern[i][1]] == Pattern[i][0] - 1):
                        Final[Pattern[i][0]] = Pattern[i][1] - 1
                        Final[Pattern[i][1] - 1] = Pattern[i][0]
                else:
                    Final[Pattern[i][0]] = Pattern[i][1]
                    Final[Pattern[i][1]] = Pattern[i][0]

    for j in range(seqlen):
        if ((j in Final) and (j - 1 in Final) and (j + 1 in Final)):
            if (abs(Final[j - 1] - Final[j + 1]) == 2 and Final[j] != int(
                (Final[j - 1] + Final[j + 1]) / 2)):
                Final[j] = int((Final[j - 1] + Final[j + 1]) / 2)

    print("Create ct file: ")
    fh = open(path_prefix + current_file, "w")
    fh.write(str(seqlen) + "\t" + current_file + "\n")
    for j in range(seqlen):
        if j in Final:
            fh.write(
                str(j + 1) + " " + SeqArr[j] + "\t" + str(j) + "\t" +
                str(j + 2) + "\t" + str(Final[j] + 1) + "\t" + str(j + 1) +
                "\n")
        else:
            fh.write(
                str(j + 1) + " " + SeqArr[j] + "\t" + str(j) + "\t" +
                str(j + 2) + "\t" + "0" + "\t" + str(j + 1) + "\n")
    fh.close()
Exemple #36
0
def read_model():
    model = model_from_json(
        open(os.path.join('cache', 'architecture.json')).read())
    model.load_weights(os.path.join('cache', 'model_weights.h5'))
    return model
Exemple #37
0
    def sufficient_feature_set(self,model, sample):
        sample = sample.reshape(1,len(sample))
        start = 0
        n_chanels = len([layer for layer in model.layers if isinstance(layer, layers.InputLayer)])
        contributions = self.compute_contributions(model, sample)[0]
        ngrams = dict()
        conv_layers = [layer for layer in model.layers if isinstance(layer, layers.Conv1D)]
        for conv_layer in conv_layers :
            intermediate_layer_model = Model(inputs=model.input,
                                             outputs=conv_layer.output)
            intermediate_output = intermediate_layer_model.predict([sample]*n_chanels)
            #print(intermediate_output.shape)
            filter_size = conv_layer.kernel_size[0]
            n_filters = intermediate_output[0].shape[1]
            out = intermediate_output[0]
            ngrams_indices = numpy.argmax(out,axis = 0) #indices of ngrams selected by global maxpooling.
            seq = [sample[0,t:t + filter_size] for t in ngrams_indices]
            filtered_ngrams = self.tokenizer.sequences_to_texts(seq)
            #compute the adjacency matrix : two filter are adjacents if they select the same ngram
            for i in range(n_filters) :
                contrib = contributions[start+i]
                filters = [start+i]
                if filtered_ngrams[i] in ngrams :
                    filters += ngrams.get(filtered_ngrams[i]).get("filters")
                    contrib += ngrams.get(filtered_ngrams[i]).get("contrib")
                ngrams.update({filtered_ngrams[i]:{'filters':filters,'contrib':contrib}})

            start+=n_filters #jump to the next list of filter (of different size)

        output_prob = model.predict([sample]*n_chanels)
        pred_class = numpy.argmax(output_prob)
        positive_ngrams = [(x[0],x[1],{'relevance':x[1]['contrib'][pred_class]-numpy.mean(numpy.delete(x[1]['contrib'], pred_class))})
                           for x in ngrams.items() if x[1]['contrib'][pred_class]-numpy.mean(numpy.delete(x[1]['contrib'], pred_class))>0]
        positive_ngrams.sort(
            key=lambda tup: tup[2]['relevance'])
        # load weights into new model
        #new_model.load_weights(self.model_file_path + '.h5')
        new_model = model_from_json(model.to_json())
        new_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        new_model.set_weights(model.get_weights())
        i = 0
        drop_list = []
        #print(positive_ngrams)
        dense_layers = [x for x in model.layers[::-1] if isinstance(x, layers.Dense)]
        first_dense_layer = dense_layers[-1]
        for ngram in positive_ngrams : # activate progressively positive features and see which are sufficient
            filters = ngram[1]['filters']
            weights = new_model.get_layer(first_dense_layer.name).get_weights()
            for k in filters:
                    weights[0][k] = 0;
            new_model.get_layer(first_dense_layer.name).set_weights(weights)
            y = new_model.predict([sample]*n_chanels)
            y = numpy.argmax(y)
            if pred_class != y :
                break
            drop_list.append(ngram)
            i += 1

        sufficient_features = dict()
        for ngram in positive_ngrams :
            if ngram not in drop_list :
                token = ngram[0]
                key = str(len(token.split()))+'-ngrams'
                if key in sufficient_features :
                    sufficient_features.get(key).append({ngram[0]:ngram[2]['relevance'].item()})
                else :
                    sufficient_features.update({key:[{ngram[0]:ngram[2]['relevance'].item()}]})

        return sufficient_features
data = pd.read_csv(test_file)

#pre-processing
tk = text.Tokenizer(nb_words=200000)

max_len = 40
tk.fit_on_texts(
    list(data.question1.values.astype(str)) +
    list(data.question2.values.astype(str)))
x1 = tk.texts_to_sequences(data.question1.values.astype(str))
x1 = sequence.pad_sequences(x1, maxlen=max_len)

x2 = tk.texts_to_sequences(data.question2.values.astype(str))
x2 = sequence.pad_sequences(x2, maxlen=max_len)

# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)

# load weights into new model
loaded_model.load_weights("our_weights.h5")
print("Loaded model from disk")

y_pred = loaded_model.predict([x1, x2], batch_size=300, verbose=0)

print np.shape(y_pred)
df = pd.DataFrame(y_pred, columns=["colummn"])
df.to_csv('testset_label.csv', index=True)
Exemple #39
0
from keras import backend as K
import pandas as pd
import numpy as np
import emoji as emoji

emoji_dictionary = {
    "0":
    "\u2764\uFE0F",  # :heart: prints a black instead of red heart depending on the font
    "1": ":baseball:",
    "2": ":beaming_face_with_smiling_eyes:",
    "3": ":downcast_face_with_sweat:",
    "4": ":fork_and_knife:",
}

with open("services/emojifier/model.json", "r") as file:
    model = model_from_json(file.read())
model.load_weights("services/emojifier/model.h5")

model._make_predict_function()

embeddings = {}
with open('services/emojifier/glove.6B.50d.txt', encoding='utf-8') as f:
    for line in f:
        values = line.split()
        word = values[0]
        coeffs = np.asarray(values[1:], dtype='float32')

        #print(word)
        #print(coeffs)
        embeddings[word] = coeffs
Exemple #40
0
def predict(model_abbr, mm, dd, yyyy):
    date = str(mm) + "/" + str(dd) + "/" + str(yyyy)

    info_dict = {}

    if model_abbr == "LSTM":
        step = 7
    else:
        step = 2

    MG = Model_Generation()
    X_in, Y_to_pred, Y_prev = MG.find_input_output_data_by_date(
        model_abbr, date, step, scaler=MinMaxScaler())

    info_dict["date"] = date
    info_dict["Y_to_pred"] = [Y_to_pred]
    info_dict["Y_prev"] = [Y_prev]

    col_list = MG.generate_col_names()
    col_list.append("DFF")
    X_in_dict = {}
    daily_data_dict = {}
    for i in range(step):
        for j in range(len(col_list)):
            if model_abbr == "LSTM":
                daily_data_dict[col_list[j]] = X_in[0][i][j]
            else:
                daily_data_dict[col_list[j]] = X_in[i * len(col_list) + j]
        X_in_dict["prev_day_" + str(i)] = daily_data_dict

    info_dict["X_in"] = X_in_dict

    if model_abbr == "naive_NN" or model_abbr == "LSTM":
        # Load model
        K.clear_session()
        json_file = open(f'models/classifiers/{model_abbr}/model/model.json',
                         'r')
        loaded_model_json = json_file.read()
        loaded_model = model_from_json(loaded_model_json)

        # Load weights into loaded model
        loaded_model.load_weights(
            f'models/classifiers/{model_abbr}/weights/weights.h5')

        # Make predictions
        loaded_model.compile(loss='categorical_crossentropy',
                             optimizer='adam',
                             metrics=['accuracy'])

        if model_abbr == "LSTM":
            Y_prob = loaded_model.predict(X_in)
        else:
            X_in_reshaped = X_in.reshape((1, len(X_in)))
            Y_prob = loaded_model.predict(X_in_reshaped)

    else:
        with open(f"models/classifiers/{model_abbr}/model/model.pickle",
                  "rb") as f:
            model = pickle.load(f)

        X_in_reshaped = X_in.reshape((1, len(X_in)))
        Y_prob = model.predict_proba(X_in_reshaped)

    info_dict["Y_prob"] = [float(item) for item in list(Y_prob[0])]

    return jsonify(info_dict)
import pandas as pd
import json
import random
from data_model import load_data

nb_classes = 10

# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
img_size = 32

X_train, X_test, Y_train, Y_test = load_data(img_size)

Y_train = np.array(Y_train)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

for i in range(1,5):
    model = model_from_json(json.loads(open('../results/run_prelu_arch.txt').read()))
    model.load_weights("../results/prelu_customcb_weights"+"_cv_"+str(i)+".hdf5")
    
    print "Test Accuracy: "
    Y_pred = model.predict_classes(X_test)
    print accuracy_score(Y_test, Y_pred), precision_score(Y_test, Y_pred, average='micro'), recall_score(Y_test, Y_pred, average='micro'), f1_score(Y_test, Y_pred, average='micro')
    print "Train Accuracy: "
    Y_pred = model.predict_classes(X_train)
    print accuracy_score(Y_train, Y_pred), precision_score(Y_train, Y_pred, average='micro'), recall_score(Y_train, Y_pred, average='micro'), f1_score(Y_train, Y_pred, average='micro')
Exemple #42
0
X_data = data[['center', 'left', 'right']].values
y_data = data['steering'].values

X_train, X_valid, y_train, y_valid = train_test_split(X_data,
                                                      y_data,
                                                      test_size=0.2,
                                                      random_state=10)

# Compile and train the model using the generator function
train_generator = batch_generator(X_train, y_train, batch_size=batch_size)
validation_generator = batch_generator(X_valid, y_valid, batch_size=batch_size)

# Load previous session model and retrain if model.json and model.h5 exists
if Path(fileModelJSON).is_file():
    with open(fileModelJSON) as jfile:
        model = model_from_json(json.load(jfile))

    model.compile(loss='mse', optimizer=Adam(lr=learning_rate))
    model.load_weights(fileWeights)
    print("Load model from disk:")
    model.summary()

# re-create model and restart training
else:

    model = Sequential()
    model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=input_shape))
    model.add(Convolution2D(24, (5, 5), strides=(2, 2), activation="relu"))
    model.add(Convolution2D(36, (5, 5), strides=(2, 2), activation="relu"))
    model.add(Convolution2D(48, (5, 5), strides=(2, 2), activation="relu"))
    model.add(Convolution2D(64, (3, 3), strides=(1, 1), activation="relu"))
Exemple #43
0
def read_model(index, cross=''):
    json_name = 'architecture' + str(index) + cross + '.json'
    weight_name = 'model_weights' + str(index) + cross + '.h5'
    model = model_from_json(open(os.path.join('cache', json_name)).read())
    model.load_weights(os.path.join('cache', weight_name))
    return model
Exemple #44
0
    # loading mapper
    print('loading mapper ... ')
    with open(os.path.join(PATH_TO_MODEL, PATH_TO_MAPPER), 'r') as mapper_file:
        mapper = dict(json.load(mapper_file))
    # create a reverse mapper dictionnary to translate embedding in tokens
    reverse_mapper = {index: token for token, index in mapper.items()}
    # loading and formating test data
    print('loading test data ...')
    attributes_test = building_data_test(PATH_TO_DATA,
                                         mapper=mapper,
                                         limit=None)

    print('loading models')
    with open(os.path.join(PATH_TO_MODEL, 'enc_architecture.json'),
              'r') as file1:
        enc = model_from_json(str(json.load(file1)))
    with open(os.path.join(PATH_TO_MODEL, 'dec_architecture.json'),
              'r') as file2:
        dec = model_from_json(str(json.load(file2)))

    enc.load_weights(os.path.join(PATH_TO_MODEL, 'enc_weights.h5'))
    dec.load_weights(os.path.join(PATH_TO_MODEL, 'dec_weights.h5'))
    print("####### ENCODER #######")
    enc.summary()
    print("####### DECODER #######")
    dec.summary()

    print('predicting the reviews ...')
    predictions = [
        make_prediction(att,
                        encoder=enc,
def test_multi_input_layer():
    ####################################################
    # test multi-input layer
    a = Input(shape=(32, ), name='input_a')
    b = Input(shape=(32, ), name='input_b')

    dense = Dense(16, name='dense_1')
    a_2 = dense(a)
    b_2 = dense(b)

    merged = layers.concatenate([a_2, b_2], name='merge')
    assert merged._keras_shape == (None, 16 * 2)
    merge_layer, merge_node_index, merge_tensor_index = merged._keras_history

    assert merge_node_index == 0
    assert merge_tensor_index == 0

    assert len(merge_layer.inbound_nodes) == 1
    assert len(merge_layer.outbound_nodes) == 0

    assert len(merge_layer.inbound_nodes[0].input_tensors) == 2
    assert len(merge_layer.inbound_nodes[0].inbound_layers) == 2

    c = Dense(64, name='dense_2')(merged)
    d = Dense(5, name='dense_3')(c)

    model = Model(inputs=[a, b], outputs=[c, d], name='model')
    assert len(model.layers) == 6
    print('model.input_layers:', model.input_layers)
    print('model.input_layers_node_indices:', model.input_layers_node_indices)
    print('model.input_layers_tensor_indices:',
          model.input_layers_tensor_indices)
    print('model.output_layers', model.output_layers)

    print('output_shape:', model.compute_output_shape([(None, 32),
                                                       (None, 32)]))
    assert model.compute_output_shape([(None, 32), (None, 32)]) == [(None, 64),
                                                                    (None, 5)]

    print('mask:', model.compute_mask([a, b], [None, None]))
    assert model.compute_mask([a, b], [None, None]) == [None, None]

    print('output_shape:', model.compute_output_shape([(None, 32),
                                                       (None, 32)]))
    assert model.compute_output_shape([(None, 32), (None, 32)]) == [(None, 64),
                                                                    (None, 5)]

    # we don't check names of first 2 layers (inputs) because
    # ordering of same-level layers is not fixed
    print('layers:', [layer.name for layer in model.layers])
    assert [l.name for l in model.layers
            ][2:] == ['dense_1', 'merge', 'dense_2', 'dense_3']
    print('input_layers:', [l.name for l in model.input_layers])
    assert [l.name for l in model.input_layers] == ['input_a', 'input_b']
    print('output_layers:', [l.name for l in model.output_layers])
    assert [l.name for l in model.output_layers] == ['dense_2', 'dense_3']

    # actually run model
    fn = K.function(model.inputs, model.outputs)
    input_a_np = np.random.random((10, 32))
    input_b_np = np.random.random((10, 32))
    fn_outputs = fn([input_a_np, input_b_np])
    assert [x.shape for x in fn_outputs] == [(10, 64), (10, 5)]

    # test get_source_inputs
    print(get_source_inputs(c))
    assert get_source_inputs(c) == [a, b]

    # serialization / deserialization
    json_config = model.to_json()
    recreated_model = model_from_json(json_config)
    recreated_model.compile('rmsprop', 'mse')

    print('recreated:')
    print([layer.name for layer in recreated_model.layers])
    print([layer.name for layer in recreated_model.input_layers])
    print([layer.name for layer in recreated_model.output_layers])
    assert [l.name for l in recreated_model.layers
            ][2:] == ['dense_1', 'merge', 'dense_2', 'dense_3']
    assert [l.name
            for l in recreated_model.input_layers] == ['input_a', 'input_b']
    assert [l.name
            for l in recreated_model.output_layers] == ['dense_2', 'dense_3']

    fn = K.function(recreated_model.inputs, recreated_model.outputs)
    input_a_np = np.random.random((10, 32))
    input_b_np = np.random.random((10, 32))
    fn_outputs = fn([input_a_np, input_b_np])
    assert [x.shape for x in fn_outputs] == [(10, 64), (10, 5)]
Exemple #46
0
def load_pretrain_weights(vade, X, Y, dataset, autoencoder=None, ae_weights=None):
    if autoencoder is None:
        ae = model_from_json(open(ae_weights).read())
        ae.load_weights('pretrain_weights/ae_'+dataset+'_weights.h5')
        vade.get_layer('encoder_0').set_weights(ae.layers[0].get_weights())
        vade.get_layer('encoder_1').set_weights(ae.layers[1].get_weights())
        vade.get_layer('encoder_2').set_weights(ae.layers[2].get_weights())
        vade.get_layer('z_mean').set_weights(ae.layers[3].get_weights())
        vade.get_layer('decoder_0').set_weights(ae.layers[-4].get_weights())
        vade.get_layer('decoder_1').set_weights(ae.layers[-3].get_weights())
        vade.get_layer('decoder_2').set_weights(ae.layers[-2].get_weights())
        vade.get_layer('output').set_weights(ae.layers[-1].get_weights())
        sample = sample_output.predict(X,batch_size=batch_size)
    else:
        autoencoder.load_weights(ae_weights)
        vade.get_layer('encoder_0').set_weights(autoencoder.layers[1].get_weights())
        vade.get_layer('encoder_1').set_weights(autoencoder.layers[2].get_weights())
        vade.get_layer('encoder_2').set_weights(autoencoder.layers[3].get_weights())
        vade.get_layer('z_mean').set_weights(autoencoder.layers[4].get_weights())
        vade.get_layer('decoder_0').set_weights(autoencoder.layers[-4].get_weights())
        vade.get_layer('decoder_1').set_weights(autoencoder.layers[-3].get_weights())
        vade.get_layer('decoder_2').set_weights(autoencoder.layers[-2].get_weights())
        vade.get_layer('output').set_weights(autoencoder.layers[-1].get_weights())
        sample = sample_output.predict(X, batch_size=batch_size)

    if dataset == 'mnist':

        gmm = GaussianMixture(n_components=n_centroid, covariance_type='diag')
        gmm.fit(sample)
        acc_0 = cluster_acc(Y, gmm.predict(sample))
        means_0 = [gmm.means_]
        for i in range(3):
            gmm.fit(sample)
            acc_0_new = cluster_acc(Y, gmm.predict(sample))
            if acc_0_new > acc_0:
                acc_0 = acc_0_new
                means_0 = gmm.means_
                covs_0 = gmm.covariances_

        K.set_value(u_p, means_0.T)
        K.set_value(lambda_p, covs_0.T)

    if dataset == 'reuters10k':
        k = KMeans(n_clusters=n_centroid)
        k.fit(sample)
        K.set_value(u_p, floatX(k.cluster_centers_.T))

    if dataset == 'har':
        g = mixture.GMM(n_components=n_centroid,covariance_type='diag',random_state=3)
        g.fit(sample)
        K.set_value(u_p, floatX(g.means_.T))
        K.set_value(lambda_p, floatX(g.covars_.T))

    if (dataset == 'custom') | (dataset is None):
        gmm = GaussianMixture(n_components=n_centroid, covariance_type='diag')
        gmm.fit(sample)
        acc_0 = cluster_acc(Y, gmm.predict(sample))
        means_0 = gmm.means_
        covs_0 = gmm.covariances_
        print(acc_0)
        print('means:', means_0.shape)
        for i in range(3):
            gmm.fit(sample)
            acc_0_new = cluster_acc(Y, gmm.predict(sample))
            if acc_0_new > acc_0:
                acc_0 = acc_0_new
                means_0 = gmm.means_
                covs_0 = gmm.covariances_

        K.set_value(u_p, means_0.T)
        K.set_value(lambda_p, covs_0.T)

    # Set trainable weights in 'latent' layer to initalized values
    K.set_value(vade.get_layer('latent').u_p, K.eval(u_p))
    K.set_value(vade.get_layer('latent').theta_p, K.eval(theta_p))
    K.set_value(vade.get_layer('latent').lambda_p, K.eval(lambda_p))

    print ('pretrain weights loaded!')
    return vade
Exemple #47
0
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing import image

#load model
model = model_from_json(open("fer.json", "r").read())
#load weights
model.load_weights('fer.h5')

face_haar_cascade = cv2.CascadeClassifier(
    cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

cap = cv2.VideoCapture(0)

while True:
    ret, test_img = cap.read(
    )  # captures frame and returns boolean value and captured image
    if not ret:
        continue
    gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
    faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)

    for (x, y, w, h) in faces_detected:
        cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0),
                      thickness=7)
        roi_gray = gray_img[
            y:y + w,
            x:x + h]  #cropping region of interest i.e. face area from  image
Exemple #48
0
from keras import Input, models, layers, optimizers, callbacks,regularizers, initializers

fold_num=1
modelpth = '/dl/sry/projects/from_hp/mCNN/src/Network/deepddg/opt_all_simpleNet_v4/model/140_4_32_32_32_64_0.5-2020.06.05.06.18.54/fold_%s_model.json'%fold_num
weightspth = '/dl/sry/projects/from_hp/mCNN/src/Network/deepddg/opt_all_simpleNet_v4/model/140_4_32_32_32_64_0.5-2020.06.05.06.18.54/fold_%s_weights-best.h5'%fold_num

#
# Load model
#
with open(modelpth, 'r') as json_file:
    loaded_model_json = json_file.read()
pre_model = models.model_from_json(loaded_model_json)  # keras.models.model_from_yaml(yaml_string)
pre_model.load_weights(filepath=weightspth)
## frozen layers before reduce
idx = 0
pre_model.summary()
for layer in pre_model.layers:
    print(idx, layer.name,layer.trainable)
    idx+=1
    # if idx <= 5:
    #     layer.trainable = False
Exemple #49
0
input_dim = 25  # 入力の次元
hidden_dim = 100  # 隠れ層の次元
output_dim = 25  # 出力の次元

max_t = 40
mcep = np.loadtxt(mcep_path)
power = mcep[:, 0]
mc = mcep[:, 1:]
mean = np.loadtxt("lstm_mean.txt")
std = np.loadtxt("lstm_std.txt")

mc = (mc - mean) / std  # 正規化
mc = np.r_[mc, np.zeros((max_t - 1, input_dim))]

# 変換用のLSTMを読み込む
lstm = model_from_json(open("lstm_model.json").read())
lstm.load_weights("lstm.h5")

# LSTM入力用に整形
data = []

for i in range(mc.shape[0] - max_t + 1):
    data.append(mc[i:i + max_t, :])

data = np.array(data).astype("float32")

converted_mcep = lstm.predict(data)  # LSTMで変換

converted_mcep = converted_mcep * std + mean  # 正規化したものを戻す
converted_mcep = np.c_[power, converted_mcep]
Exemple #50
0
def OpenFile():
    name = askopenfilename(initialdir="/",
                           title="Select file",
                           filetypes=(("jpeg files", "*.jpg *.png"),
                                      ("all files", "*.*")))

    global fileName
    fileName = name
    global image_address
    image_address = fileName
    try:
        img = ImageTk.PhotoImage(Image.open(fileName))
        p2 = tk.Label(root, image=img).pack()
    except Exception:
        pass

    #detection code start from here

#constant variables for text detection
    min_confidence = 0.5
    width = 320
    height = 320

    # load the input image
    image = cv2.imread(image_address)
    orig = image.copy()
    (H, W) = image.shape[:2]

    # set new width and height
    (newW, newH) = (width, height)
    rW = W / float(newW)
    rH = H / float(newH)

    # resize the image
    image = cv2.resize(image, (newW, newH))
    (H, W) = image.shape[:2]

    #the first layer is the output probabilities
    #the second layer is used to derive the bounding box coordinates of text
    layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]

    # load the pre-trained EAST text detector
    print("[INFO] loading EAST text detector...")
    net = cv2.dnn.readNet('frozen_east_text_detection.pb')

    # construct a blob from the image
    # create the model with the two output layer sets
    blob = cv2.dnn.blobFromImage(image,
                                 1.0, (W, H), (123.68, 116.78, 103.94),
                                 swapRB=True,
                                 crop=False)
    net.setInput(blob)
    (scores, geometry) = net.forward(layerNames)

    # get rows and coloums from score
    #intialize rectangles gor bounding boxs and confidence score
    (numRows, numCols) = scores.shape[2:4]
    rects = []
    confidences = []

    # loop over rows
    for y in range(0, numRows):
        # get the score data and dimensions of the rectangle
        scoresData = scores[0, 0, y]
        xData0 = geometry[0, 0, y]
        xData1 = geometry[0, 1, y]
        xData2 = geometry[0, 2, y]
        xData3 = geometry[0, 3, y]
        anglesData = geometry[0, 4, y]

        # loop over columns
        for x in range(0, numCols):
            # score not confident
            if scoresData[x] < min_confidence:
                continue

            # compute the offset factor
            (offsetX, offsetY) = (x * 4.0, y * 4.0)

            # get rotation angle and make sin and cosine
            angle = anglesData[x]
            cos = np.cos(angle)
            sin = np.sin(angle)

            # width and height of the bounding box
            h = xData0[x] + xData2[x]
            w = xData1[x] + xData3[x]

            # compute x and y coordinates of the bounding box
            endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
            endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
            startX = int(endX - w)
            startY = int(endY - h)

            # add the rectangles and score made
            rects.append((startX, startY, endX, endY))
            confidences.append(scoresData[x])

    # apply non_max_suppression
    boxes = non_max_suppression(np.array(rects), probs=confidences)

    #sorting of text arrangement
    def func(val):
        return val[1]

    boxes = sorted(boxes, key=func)

    #function to find if r1 rectangle contains r2 rectangle
    def contains(r1, r2):
        return (r1[0] < r2[0] < r2[0] + r2[2] < r1[0] + r1[2]) and (
            r1[1] < r2[1] < r2[1] + r2[3] < r1[1] + r1[3])

#list to contain all text in character form
#eg crop_imag = [[H,E,L,L,O][W,O,R,L,D]]

    crop_img = []

    # loop over the bounding boxes
    for (startX, startY, endX, endY) in boxes:
        # scale the bounding box coordinates based on the respective
        # ratios
        startX = int(startX * rW)
        startY = int(startY * rH)
        endX = int(endX * rW)
        endY = int(endY * rH)

        #croping of text from original image
        unit = orig[startY:endY, startX:endX]

        #changing cropped image to grayscale image
        gray_img = cv2.cvtColor(unit, cv2.COLOR_BGR2GRAY)

        #finding threshold value from the grayscale average value
        threshold = np.mean(gray_img)
        #getting the binary image with the help of thresholding
        _, thresh = cv2.threshold(gray_img, threshold, 255, cv2.THRESH_BINARY)
        #diliation and erosion
        #            thresh = cv2.dilate(thresh, kernel, iterations=1)
        #            thresh = cv2.erode(thresh, kernel, iterations=1)

        #Finding contours in the binary image
        contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

        #list of contour polygon and bounding rectangle
        contours_poly = [None] * len(contours)
        boundRect = [None] * len(contours)

        #looping in the contour list
        for i, c in enumerate(contours):
            #making polygons of the contours
            contours_poly[i] = cv2.approxPolyDP(c, 3, True)
            #making rectangles from that polygon
            # returns x,y,h,w of rectangle in a list
            boundRect[i] = cv2.boundingRect(contours_poly[i])

        #sorting the rectangle boxes into ascending order of x
        boundRect = sorted(boundRect, key=lambda x: x[0])
        #list to crop character image from text
        char_crop = []

        #cropping the rectangles that are not in another rectangle
        for i in range(len(boundRect)):
            count = 1
            for j in range(len(boundRect)):
                if not i == j:
                    if not contains(boundRect[j], boundRect[i]):
                        count += 1
                        if count == len(boundRect):
                            char_crop.append(
                                unit[boundRect[i][1]:boundRect[i][1] +
                                     boundRect[i][3],
                                     boundRect[i][0]:boundRect[i][0] +
                                     boundRect[i][2]])

        #adding the cropped characters to the final list
        crop_img.append(char_crop)

    #importing keras files to load trained model
    from keras.models import model_from_json

    # load json and create model
    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("model.h5")

    # Compile model
    loaded_model.compile(optimizer='adam',
                         loss='categorical_crossentropy',
                         metrics=['accuracy'])

    #funtion to return the predicition location that is equal to 1
    def result(array):
        rt = 26
        for i in range(0, len(array[0])):
            if array[0][i] == 1:
                rt = i
        return rt

    #prediction list as the model was trained
    predict_list = [
        'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
        'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ''
    ]
    global content
    content = ''

    #predicting every character in the text in the crop_img
    for text_img in crop_img:
        for image in text_img:
            image = cv2.resize(image, (32, 32))
            image = np.expand_dims(image, axis=0)
            rslt = result(loaded_model.predict(image))
            content += predict_list[rslt]
        content += ' '

    print(content)
    out = tk.Label(root, justify=tk.CENTER, padx=10, text=content).pack()
Exemple #51
0
def test_network(architecture, weights, data):
    model = model_from_json(open(architecture).read())
    model.load_weights(weights)
    pred = model.predict_classes(data, batch_size=1)
    # score = model.predict_proba(data, batch_size=1)
    return pred
Exemple #52
0
labeldict_filename = 'label_dict_special.pkl'

# DLIB's model path for face pose predictor and deep neural network model
predictor_path = 'shape_predictor_68_face_landmarks.dat'
face_rec_model_path = 'dlib_face_recognition_resnet_model_v1.dat'

# .pkl file containing dictionary information about person's label corresponding with neural network output data
label_dict = joblib.load(nn_model_dir + labeldict_filename)
#print(label_dict)
# ====================================================================================

json_model_file = open(nn_model_dir + json_filename, 'r')
json_model = json_model_file.read()
json_model_file.close()

cnn_model = model_from_json(json_model)
cnn_model.load_weights(nn_model_dir + hdf5_filename)

detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
fa = FaceAligner(sp)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)

cap = cv2.VideoCapture(0)
check = []  # list to check 是否為本人
check_before_execute = False  #第一次抓到人臉時確認是否為自己

roll_call_file = "test.csv"  # 點名用的csv檔案
first_frame = True  #第一張frame

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-model', type=str, required=True)
    parser.add_argument('-weights', type=str, required=True)
    parser.add_argument('-results', type=str, required=True)
    args = parser.parse_args()

    model = model_from_json(open(args.model).read())
    model.load_weights(args.weights)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    questions_val = open('../data/preprocessed/questions_val2014.txt',
                         'r').read().decode('utf8').splitlines()
    answers_val = open('../data/preprocessed/answers_val2014_all.txt',
                       'r').read().decode('utf8').splitlines()
    images_val = open('../data/preprocessed/images_val2014_all.txt',
                      'r').read().decode('utf8').splitlines()
    vgg_model_path = '../features/coco/vgg_feats.mat'

    print 'Model compiled, weights loaded...'
    labelencoder = joblib.load('../models/labelencoder.pkl')

    features_struct = scipy.io.loadmat(vgg_model_path)
    VGGfeatures = features_struct['feats']
    print 'loaded vgg features'
    image_ids = open('../features/coco_vgg_IDMap.txt').read().splitlines()
    img_map = {}
    for ids in image_ids:
        id_split = ids.split()
        img_map[id_split[0]] = int(id_split[1])

    nlp = English()
    print 'loaded word2vec features'

    nb_classes = 1000
    y_predict_text = []
    batchSize = 128
    widgets = [
        'Evaluating ',
        Percentage(), ' ',
        Bar(marker='#', left='[', right=']'), ' ',
        ETA()
    ]
    pbar = ProgressBar(widgets=widgets)

    for qu_batch, an_batch, im_batch in pbar(
            zip(grouper(questions_val, batchSize, fillvalue=questions_val[0]),
                grouper(answers_val, batchSize, fillvalue=answers_val[0]),
                grouper(images_val, batchSize, fillvalue=images_val[0]))):
        X_q_batch = get_questions_matrix_sum(qu_batch, nlp)
        if 'language_only' in args.model:
            X_batch = X_q_batch
        else:
            X_i_batch = get_images_matrix(im_batch, img_map, VGGfeatures)
            X_batch = np.hstack((X_q_batch, X_i_batch))
        y_predict = model.predict_classes(X_batch, verbose=0)
        y_predict_text.extend(labelencoder.inverse_transform(y_predict))

    correct_val = 0.0
    total = 0
    f1 = open(args.results, 'w')

    for prediction, truth, question, image in zip(y_predict_text, answers_val,
                                                  questions_val, images_val):
        temp_count = 0
        for _truth in truth.split(';'):
            if prediction == _truth:
                temp_count += 1

        if temp_count > 2:
            correct_val += 1
        else:
            correct_val += float(temp_count) / 3

        total += 1
        f1.write(question.encode('utf-8'))
        f1.write('\n')
        f1.write(image.encode('utf-8'))
        f1.write('\n')
        f1.write(prediction)
        f1.write('\n')
        f1.write(truth.encode('utf-8'))
        f1.write('\n')
        f1.write('\n')

    f1.write('Final Accuracy is ' + str(correct_val / total))
    f1.close()
    f1 = open('../results/overall_results.txt', 'a')
    f1.write(args.weights + '\n')
    f1.write(str(correct_val / total) + '\n')
    f1.close()
    print 'Final Accuracy on the validation set is', correct_val / total
Exemple #54
0
    def __init__(self):
        self.directory = "model/"
        self.hs = Hunspell('en_US')
        self.vs = cv2.VideoCapture(0)
        self.current_image = None
        self.current_image2 = None

        self.json_file = open(self.directory + "model.json", "r")
        self.model_json = self.json_file.read()
        self.json_file.close()
        self.loaded_model = model_from_json(self.model_json)
        self.loaded_model.load_weights(self.directory + "model.h5")

        self.json_file_dru = open(self.directory + "model_dru.json", "r")
        self.model_json_dru = self.json_file_dru.read()
        self.json_file_dru.close()
        self.loaded_model_dru = model_from_json(self.model_json_dru)
        self.loaded_model_dru.load_weights(self.directory + "model_dru.h5")

        self.json_file_tkdi = open(self.directory + "model_tkdi.json", "r")
        self.model_json_tkdi = self.json_file_tkdi.read()
        self.json_file_tkdi.close()
        self.loaded_model_tkdi = model_from_json(self.model_json_tkdi)
        self.loaded_model_tkdi.load_weights(self.directory + "model_tkdi.h5")

        self.json_file_smn = open(self.directory + "model_smn.json", "r")
        self.model_json_smn = self.json_file_smn.read()
        self.json_file_smn.close()
        self.loaded_model_smn = model_from_json(self.model_json_smn)
        self.loaded_model_smn.load_weights(self.directory + "model_smn.h5")

        self.ct = {}
        self.ct['blank'] = 0
        self.blank_flag = 0
        for i in ascii_uppercase:
            self.ct[i] = 0
        print("Loaded model from disk")

        self.root = tk.Tk()
        self.root.title("Sign language to Text Converter")
        self.root.protocol('WM_DELETE_WINDOW', self.destructor)
        self.root.geometry("1100x1100")

        self.canvas = tk.Canvas(width=1100, height=1100)
        self.canvas.pack(fill="both", expand=True)

        self.panel = tk.Label(self.root)
        self.panel.place(x=135, y=90, width=640, height=480)

        self.panel2 = tk.Label(self.root)  # initialize image panel
        self.panel2.place(x=460, y=95, width=310, height=310)

        self.canvas.create_text(450,
                                50,
                                text="Sign Language to Text",
                                fill="black",
                                font=("courier", 30, "bold"))

        self.panel3 = tk.Label(self.root)  # Current Symbol
        self.panel3.place(x=500, y=600)
        self.canvas.create_text(155,
                                653,
                                text="Character:",
                                fill="black",
                                font=("courier", 30, "bold"))

        self.panel4 = tk.Label(self.root)  # Word
        self.panel4.place(x=220, y=680)
        self.canvas.create_text(110,
                                713,
                                text="Word:",
                                fill="black",
                                font=("courier", 30, "bold"))

        self.panel5 = tk.Label(self.root)  # Sentence
        self.panel5.place(x=350, y=740)
        self.canvas.create_text(140,
                                773,
                                text="Sentence:",
                                fill="black",
                                font=("courier", 30, "bold"))

        self.T4 = tk.Label(self.root)
        self.T4.place(x=270, y=800)
        self.T4.config(text="Suggestions",
                       fg="red",
                       font=("Courier", 20, "bold"))

        self.btcall = tk.Button(self.root,
                                command=self.action_call,
                                height=0,
                                width=0)
        self.btcall.config(text="About",
                           bg="black",
                           fg="white",
                           font=("Courier", 14))
        self.btcall.place(x=950, y=20)

        self.bt1 = tk.Button(self.root,
                             bg="#DAF7A6",
                             activebackground='white',
                             command=self.action1,
                             height=0,
                             width=0)
        self.bt1.place(x=25, y=890)

        self.bt2 = tk.Button(self.root,
                             bg="#DAF7A6",
                             activebackground='white',
                             command=self.action2,
                             height=0,
                             width=0)
        self.bt2.place(x=325, y=890)

        self.bt3 = tk.Button(self.root,
                             bg="#DAF7A6",
                             activebackground='white',
                             command=self.action3,
                             height=0,
                             width=0)
        self.bt3.place(x=625, y=890)

        self.bt4 = tk.Button(self.root,
                             bg="#DAF7A6",
                             activebackground='white',
                             command=self.action4,
                             height=0,
                             width=0)
        self.bt4.place(x=25, y=950)

        self.bt5 = tk.Button(self.root,
                             bg="#DAF7A6",
                             activebackground='white',
                             command=self.action5,
                             height=0,
                             width=0)
        self.bt5.place(x=325, y=950)

        self.bt6 = tk.Button(self.root,
                             text="Audio",
                             bg="#DAF7A6",
                             activebackground='white',
                             font=("Courier", 20))
        self.bt6.place(x=930, y=80)

        self.bt7 = tk.Button(self.root,
                             text="Backspace",
                             bg="#DAF7A6",
                             activebackground='white',
                             font=("Courier", 20))
        self.bt7.place(x=880, y=140)

        self.bt8 = tk.Button(self.root,
                             text="Reset",
                             bg="#DAF7A6",
                             activebackground='white',
                             font=("Courier", 20))
        self.bt8.place(x=930, y=200)

        self.str = ""
        self.word = ""
        self.current_symbol = "Empty"
        self.photo = "Empty"
        self.video_loop()
Exemple #55
0
                    ["singletop_t", "stopt_Nominal", "topQuarks"],
                    ["ZZ_improved", "ZZ_imporved_Nominal", "SMdiboson"],
                    ["WW_improved", "WW_imporved_Nominal", "SMdiboson"],
                    ["WZ_improved", "WZ_imporved_Nominal", "SMdiboson"]])

# Data
data_list = np.array([["data15", "data_Nominal", "data"],
                      ["data16", "data_Nominal", "data"]])

h_mc = TH1D('mc', 'mc', 100, 0, 3000)
h_data = TH1D('data', 'data', 100, 0, 3000)

if __name__ == "__main__":

    # CNN Model loading
    model = model_from_json(open('model1_architecture.json').read())
    model.load_weights('model1_weights.h5')

    # mc
    for i in range(mc_list[:, 0].size):
        filePath = prePath + prefilename_mc + mc_list[i][0] + ".root"
        print filePath
        f = TFile(filePath, 'read')
        tr = f.Get(mc_list[i][1])
        for entry in tr:
            h_mc.Fill(entry.Mlljj)

    # data
    for i in range(data_list[:, 0].size):
        filePath = prePath + prefilename_data + data_list[i][0] + ".root"
        print filePath
Exemple #56
0
    def __init__(self):

        # Initiate some of the properties of text to voice function
        self.engine = pyttsx3.init()
        self.engine.setProperty("rate", 200)  # Set the speed of the speaker
        self.voices = self.engine.getProperty(
            "voices")  # Get all the available voices
        self.engine.setProperty("voice", self.voices[1].id)

        # To get the access to the camera and get frames
        self.vs = cv2.VideoCapture(0)
        self.current_image = None
        self.current_image2 = None

        # Here we create a variable to store the address of the models
        self.directory = 'model'

        # Import all the required Neural Netwroks
        self.json_file = open(self.directory + "\model-bw.json", "r")
        self.model_json = self.json_file.read()
        self.json_file.close()
        self.loaded_model = model_from_json(self.model_json)
        self.loaded_model.load_weights(self.directory + "\model-bw.h5")

        self.json_file_dru = open(self.directory + "\model-bw_dru.json", "r")
        self.model_json_dru = self.json_file_dru.read()
        self.json_file_dru.close()
        self.loaded_model_dru = model_from_json(self.model_json_dru)
        self.loaded_model_dru.load_weights(self.directory + "\model-bw_dru.h5")

        self.json_file_tkdi = open(self.directory + "\model-bw_tkdi.json", "r")
        self.model_json_tkdi = self.json_file_tkdi.read()
        self.json_file_tkdi.close()
        self.loaded_model_tkdi = model_from_json(self.model_json_tkdi)
        self.loaded_model_tkdi.load_weights(self.directory +
                                            "\model-bw_tkdi.h5")

        self.json_file_smn = open(self.directory + "\model-bw_smn.json", "r")
        self.model_json_smn = self.json_file_smn.read()
        self.json_file_smn.close()
        self.loaded_model_smn = model_from_json(self.model_json_smn)
        self.loaded_model_smn.load_weights(self.directory + "\model-bw_smn.h5")

        self.ct = {}
        self.ct['Blank'] = 0
        self.blank_flag = 0

        # Here "i" represent each character from A-Z. and set 0 to each unit of the ct array
        for i in ascii_uppercase:
            self.ct[i] = 0

        self.root = tk.Tk()
        self.root.title("American-Sign-to-Speech")
        self.root.protocol('WM_DELETE_WINDOW', self.destructor)
        self.root.geometry("800x800")  # Set the intital size of the window

        self.panel = tk.Label(
            self.root)  # give the location of the colored screen window
        self.panel.place(x=135, y=10, width=640, height=640)

        self.panel2 = tk.Label(
            self.root
        )  # Give the location of the white and black screen window
        self.panel2.place(x=460, y=95, width=310, height=310)

        self.T = tk.Label(
            self.root)  # location of the main title of the application
        self.T.place(x=31, y=17)
        self.T.config(text="American-Sign-to-Speech",
                      font=("courier", 40, "bold"))

        self.panel3 = tk.Label(
            self.root
        )  # set the location of the predictive symmol/ Character value
        self.panel3.place(x=450, y=610)

        self.T1 = tk.Label(
            self.root)  # set the location of the character TEXT in the window
        self.T1.place(x=30, y=610)
        self.T1.config(text="Character :", font=("Courier", 40, "bold"))

        # Create Following variables and initialize them as empty variables
        self.current_symbol = "Empty"
        self.photo = "Empty"

        # Call the loop function
        self.video_loop()
Exemple #57
0
#make predictions per model and plot on axes
workinghome = '/Users/aklimase/Documents/GMM_ML/Talapas_run/nostressdrop/top10' + site
modellist = glob.glob(workinghome + '/hidden*')
ANN_top10 = []
plt.set_cmap('viridis')

for m in modellist:
    #read in model files
    ANN_list = []

    foldername = m.split('/')[-1]
    for i in range(fold):
        json_file = open(m + '/' + 'model_' + str(i) + '.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        ANN = model_from_json(loaded_model_json, custom_objects={'GlorotUniform': glorot_uniform()})
        ANN.load_weights(workinghome + '/' +  foldername   + '/' + 'model_' + str(i) + '.h5')
        ANN_test_predicted = ANN.predict(x_test_scale)
        ANN_list.append(ANN)    
#        setup_curves_compare(site, scaler, workinghome, foldername, siteparams, ANN_list, vref, pre_scatter = np.asarray([0]), obs_scatter = np.asarray([0]), dist_scatter = np.asarray([0]), mag_scatter = np.asarray([0]))
    ANN_top10.append(ANN_list)#the 5 fold models

    mlist = np.linspace(2.8,5.0,200)
    Rlist = np.linspace(10.,250.,200)
    X, Y = np.meshgrid(Rlist, mlist)
    Z = np.zeros((len(X), len(Y)))

    for i in range(len(X)):
        if site == '5coeff':
            d = {'mw': Y[i][0], 'R': X[i]}    
        else:
Exemple #58
0
    std = 64.15
    return (x-mean)/(std+1e-7)

(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')

y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)

x_train, x_test = normalize(x_train, x_test)

json_file = open('cifar10vgg.json', 'r')
cifar10_model_json = json_file.read()
json_file.close()
cifar10_model = model_from_json(cifar10_model_json)
cifar10_model.load_weights("cifar10vgg.h5")
cifar10_model.compile(loss='categorical_crossentropy',optimizer='SGD',metrics=['accuracy'])

def sparse_SVD_ar(U,S,V,inp_act,out_act,keep,sr,rr):
    
    tU, tS, tV = U[:, 0:keep], S[0:keep], V[0:keep, :]

    # Input node selection
    iwm = np.sum(abs(inp_act),axis=0)
    imid = sorted(iwm)[int(U.shape[0]*sr)]
    ipl = np.where(iwm<imid)[0]

    # Output node selection
    owm = np.sum(abs(out_act),axis=0)
    omid = sorted(owm)[int(V.shape[1]*sr)]
Exemple #59
0
    return file_list


#################
#     Main      #
#################

#path_prefix = "/Share/home/huboqin/project/deepfold_all_data/final/result/"
path_prefix = args.output + "/"
if not os.path.exists(path_prefix):
    os.mkdir(path_prefix, 0755)

# Load 1D and 2D DeepFold model
if (args.subset):
    path2 = "./model/2D_S/"
    model_1 = model_from_json(
        open("./model/1D_S/DeepFold_1D_architecture.json").read())
    model_1.load_weights("./model/1D_S/DeepFold_1D_weight.h5")
if (args.fullset):
    path2 = "./model/2D_F/"
    model_1 = model_from_json(
        open("./model/1D_F/DeepFold_1D_architecture.json").read())
    model_1.load_weights("./model/1D_F/DeepFold_1D_weight.h5")

#dir_path="./test/"
dir_path = args.input + "/"
extension_list = ['ct']
seqlist = get_file_list(dir_path, extension_list)

for file in seqlist:
    fh = open(file, 'r')
    headline = fh.readline()
def test_recursion():
    ####################################################
    # test recursion

    a = Input(shape=(32, ), name='input_a')
    b = Input(shape=(32, ), name='input_b')

    dense = Dense(16, name='dense_1')
    a_2 = dense(a)
    b_2 = dense(b)
    merged = layers.concatenate([a_2, b_2], name='merge')
    c = Dense(64, name='dense_2')(merged)
    d = Dense(5, name='dense_3')(c)

    model = Model(inputs=[a, b], outputs=[c, d], name='model')

    e = Input(shape=(32, ), name='input_e')
    f = Input(shape=(32, ), name='input_f')
    g, h = model([e, f])

    # g2, h2 = model([e, f])

    assert g._keras_shape == c._keras_shape
    assert h._keras_shape == d._keras_shape

    # test separate manipulation of different layer outputs
    i = Dense(7, name='dense_4')(h)

    final_model = Model(inputs=[e, f], outputs=[i, g], name='final')
    assert len(final_model.inputs) == 2
    assert len(final_model.outputs) == 2
    assert len(final_model.layers) == 4

    # we don't check names of first 2 layers (inputs) because
    # ordering of same-level layers is not fixed
    print('final_model layers:', [layer.name for layer in final_model.layers])
    assert [layer.name
            for layer in final_model.layers][2:] == ['model', 'dense_4']

    print(model.compute_mask([e, f], [None, None]))
    assert model.compute_mask([e, f], [None, None]) == [None, None]

    print(final_model.compute_output_shape([(10, 32), (10, 32)]))
    assert final_model.compute_output_shape([(10, 32), (10, 32)]) == [(10, 7),
                                                                      (10, 64)]

    # run recursive model
    fn = K.function(final_model.inputs, final_model.outputs)
    input_a_np = np.random.random((10, 32))
    input_b_np = np.random.random((10, 32))
    fn_outputs = fn([input_a_np, input_b_np])
    assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]

    # test serialization
    model_config = final_model.get_config()
    print(json.dumps(model_config, indent=4))
    recreated_model = Model.from_config(model_config)

    fn = K.function(recreated_model.inputs, recreated_model.outputs)
    input_a_np = np.random.random((10, 32))
    input_b_np = np.random.random((10, 32))
    fn_outputs = fn([input_a_np, input_b_np])
    assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]

    ####################################################
    # test multi-input multi-output

    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])

    o = Input(shape=(32, ), name='input_o')
    p = Input(shape=(32, ), name='input_p')
    q, r = model([o, p])

    assert n._keras_shape == (None, 5)
    assert q._keras_shape == (None, 64)
    s = layers.concatenate([n, q], name='merge_nq')
    assert s._keras_shape == (None, 64 + 5)

    # test with single output as 1-elem list
    multi_io_model = Model([j, k, o, p], [s])

    fn = K.function(multi_io_model.inputs, multi_io_model.outputs)
    fn_outputs = fn([
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32))
    ])
    assert [x.shape for x in fn_outputs] == [(10, 69)]

    # test with single output as tensor
    multi_io_model = Model([j, k, o, p], s)

    fn = K.function(multi_io_model.inputs, multi_io_model.outputs)
    fn_outputs = fn([
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32))
    ])
    # note that the output of the K.function will still be a 1-elem list
    assert [x.shape for x in fn_outputs] == [(10, 69)]

    # test serialization
    print('multi_io_model.layers:', multi_io_model.layers)
    print('len(model.inbound_nodes):', len(model.inbound_nodes))
    print('len(model.outbound_nodes):', len(model.outbound_nodes))
    model_config = multi_io_model.get_config()
    print(model_config)
    print(json.dumps(model_config, indent=4))
    recreated_model = Model.from_config(model_config)

    fn = K.function(recreated_model.inputs, recreated_model.outputs)
    fn_outputs = fn([
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32))
    ])
    # note that the output of the K.function will still be a 1-elem list
    assert [x.shape for x in fn_outputs] == [(10, 69)]

    config = model.get_config()
    Model.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)

    ####################################################
    # test invalid graphs

    # input is not an Input tensor
    j = Input(shape=(32, ), name='input_j')
    j = Dense(32)(j)
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])

    with pytest.raises(TypeError):
        Model([j, k], [m, n])

    # disconnected graph
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    with pytest.raises(RuntimeError):
        Model([j], [m, n])

    # redundant outputs
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    # this should work with a warning
    Model([j, k], [m, n, n])

    # redundant inputs
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    with pytest.raises(ValueError):
        Model([j, k, j], [m, n])

    # i have not idea what I'm doing: garbage as inputs/outputs
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    with pytest.raises(TypeError):
        Model([j, k], [m, n, 0])

    ####################################################
    # test calling layers/models on TF tensors

    if K._BACKEND == 'tensorflow':
        import tensorflow as tf
        j = Input(shape=(32, ), name='input_j')
        k = Input(shape=(32, ), name='input_k')
        m, n = model([j, k])
        tf_model = Model([j, k], [m, n])

        j_tf = tf.placeholder(dtype=K.floatx())
        k_tf = tf.placeholder(dtype=K.floatx())
        m_tf, n_tf = tf_model([j_tf, k_tf])
        assert m_tf.get_shape().as_list() == [None, 64]
        assert n_tf.get_shape().as_list() == [None, 5]

        # test merge
        layers.concatenate([j_tf, k_tf], axis=1)
        layers.add([j_tf, k_tf])

        # test tensor input
        x = tf.placeholder(shape=(None, 2), dtype=K.floatx())
        InputLayer(input_tensor=x)

        x = Input(tensor=x)
        Dense(2)(x)