Ejemplo n.º 1
0
def model_load(downloaded_path):
    """
    Load list of three models and keras.layers.Input()
    :param downloaded_path:
    :return: List containing three models and keras.layers.Input()
    """
    input_shape = (28, 28, 1)

    # define input tensor as a placeholder
    input_tensor = Input(shape=input_shape)

    # load multiple models sharing same input tensor
    train = not os.path.isfile(os.path.join(_deepxplore_mnist_dir,
                                            'Model1.h5'))
    model1 = Model1(input_tensor=input_tensor, train=train)

    train = not os.path.isfile(os.path.join(_deepxplore_mnist_dir,
                                            'Model2.h5'))
    model2 = Model2(input_tensor=input_tensor, train=train)

    train = not os.path.isfile(os.path.join(_deepxplore_mnist_dir,
                                            'Model3.h5'))
    model3 = Model3(input_tensor=input_tensor, train=train)

    return model1, model2, model3, input_tensor
Ejemplo n.º 2
0
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)
#print(gan.summary())
#
# gen_img = generator.predict()
#
# # orig_img = gen_img.copy()

adam2 = Adam(lr=args.step, beta_1=0.5)
#actually we don't care all output of model1,2,3 but only the category that we focus on
#model = Model( x)
input_shape = (1, img_rows, img_cols)
# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)
model1 = Model1(input_tensor=input_tensor)

model2 = Model2(input_tensor=input_tensor)
model3 = Model3(input_tensor=input_tensor)

model1.trainable = False
model2.trainable = False
model3.trainable = False

orig_label = 1

# layer_name1, index1 = neuron_to_cover(model_layer_dict1)
# layer_name2, index2 = neuron_to_cover(model_layer_dict2)
# layer_name3, index3 = neuron_to_cover(model_layer_dict3)
#
Ejemplo n.º 3
0
import torch.optim
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from Model1 import Model1

#dopo aver richiamato la classe per il model faccio la load

#load gensim model
model = gensim.models.KeyedVectors.load_word2vec_format(
    '/media/daniele/AF56-12AA/GoogleNews-vectors-negative300.bin', binary=True)
#model = gensim.models.KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True)

#load my model
checkpoint = torch.load('checkpoint-2.pth')
model_options = checkpoint["model_options"]
model2 = Model1(**model_options)
#hidden=checkpoint["h"]
model2.load_state_dict(checkpoint["model_state"])

#modello_h=modello.h
#modello_dict=modello.x
#prepare to test
zero = torch.FloatTensor(1, 1)
zero.fill_(0)
fineparola = torch.cat([zero, zero], 1)
h = torch.zeros(1, 1, 1024)
target_as_input = torch.zeros(1, 302)
#print(fineparola)
try:
    domanda = input("you: ")
    domanda = re.findall(r'\w+', domanda)
Ejemplo n.º 4
0
        if ins_type=="VTL":
            zeta2=0.2
            zeta1=1
            
        #Data.Maxtour= zeta1*math.ceil(float(NN)/M) * np.percentile(Data.distances.values(),50) 
        Data.Maxtour= zeta1*math.ceil(float(NN)/M[NN]) * np.percentile(list(Data.distances.values()),50)
        #Data.Q= zeta2 * Data.G.node[0]['supply']/M # very tight capacity for instanc 6-10
        Data.Q= zeta2 * Data.G.nodes[0]['supply']/M[NN]
        #Data.Total_dis_epsilon= 0.85* M*Data.Maxtour#0.85 * M*Data.Maxtour
        Data.Total_dis_epsilon= 0.85* M[NN]*Data.Maxtour
        
        R=R_dic[File_name ]
        
        Data.Q= max ( math.ceil(Data.total_demand / float(Data.M) ) , max(dict(Data.Gc.nodes(data='demand')).values())  )
        start= time()
        best_obj ,LB ,Runtime ,GAP = Model1(Data,R)
        #best_obj ,LB ,Runtime ,GAP = Model2(Data,R)
        #best_obj ,LB ,Runtime ,GAP = Model1_V2(Data,R)
        #oldresult=read_object('G:\My Drive\\1-PhD thesis\equitable relief routing\Code\%s\%s_BnPresult' %(Case_name,File_name)  )
        
        results[File_name]=[best_obj ,LB ,Runtime ,GAP]
        #oldresult[File_name][0]=  best_obj
        #results=oldresult
        Model_runtime=time()-start
        
        
        #save_object(results,'G:\My Drive\\1-PhD thesis\\2 - equitable relief routing\Code\%s\%s_NewModel' %(Case_name,File_name) )
        
        #save_object(results,'G:\My Drive\\1-PhD thesis\\2 - equitable relief routing\Code\%s\%s_Modelresult' %(Case_name,File_name) )        

Ejemplo n.º 5
0
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_train /= 255
x_test = x_test.astype('float32')
x_test /= 255

input_tensor = Input(shape=input_shape)

if args.model == 'Model1':
    model = Model1(input_tensor=input_tensor)
elif args.model == 'Model2':
    model = Model2(input_tensor=input_tensor, retrain_num=retrain_num)
elif args.model == 'Model3':
    model = Model3(input_tensor=input_tensor, retrain_num=retrain_num)
elif args.model == 'Model4':
    model = Model4(input_tensor=input_tensor, retrain_num=retrain_num)
elif args.model == 'Model5':
    model = Model5(input_tensor=input_tensor, retrain_num=retrain_num)

# if args.model == 'Similar_Model1':
#     model = Similar_Model1(input_tensor=input_tensor)
# elif args.model == 'Similar_Model2':
#     model = Similar_Model2(input_tensor=input_tensor)
# elif args.model == 'Similar_Model3':
#     model = Similar_Model3(input_tensor=input_tensor)
Ejemplo n.º 6
0
                try:
                    videoLen_batch, labelLen_batch, video_batch, label_batch = sess.run(
                        [videoLens, labelLens, videos, labels])
                    label_batch = converLabelsToInt(utilDict, label_batch)
                    feedDict = m.get_feed_dict(videoLen_batch,
                                               labelLen_batch,
                                               video_batch,
                                               label_batch,
                                               isTrain=True)
                    loss, _, cer = sess.run([m.cost, m.train_op, m.cer],
                                            feed_dict=feedDict)
                    print('Train: epoch:{}, step:{}, loss:{}, cer:{}'.format(
                        epoch, step, loss, cer))
                    step += 1
                except tf.errors.OutOfRangeError:
                    if epoch and epoch % 5 == 0:
                        saver.save(sess,
                                   ckptPath,
                                   write_meta_graph=True,
                                   global_step=epoch)
                    break


MAX_EPOCH = 40000
REPEAT = 1
BATCH_SIZE = 50
TRAIN_TFRECORD = 'put your tfrecord file here'
utilDict = rloader.loadUtilDict('utilDict.pkl')
m = Model1(utilDict)
train(TRAIN_TFRECORD, MAX_EPOCH, BATCH_SIZE, REPEAT, utilDict, m, '')