Exemplo n.º 1
0
def CNN_model(frameHeight, frameWidth):
    model = Sequential()
    model.add(Convolution2D(32, 3, 3, border_mode='same', init='he_normal', activation='relu',
                            input_shape=(1, int(frameHeight), int(frameWidth))))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))

    model.add(Convolution2D(64, 3, 3, border_mode='same', init='he_normal', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Convolution2D(128, 3, 3, border_mode='same', init='he_normal', activation='relu'))
    model.add(MaxPooling2D(pool_size=(8, 8)))
    model.add(Dropout(0.2))

    model.add(Flatten())

    model.add(Dense(32, W_regularizer=l2(1.26e-7)))
    model.add(Activation('relu'))

    model.add(Dense(2, W_regularizer=l2(1e-0)))
    model.add(Activation('softmax'))

    model.compile(Adam(lr=1e-3), loss='categorical_crossentropy')

    plot(model, to_file='model.png')

    return model
Exemplo n.º 2
0
def cnn_optimise(W):
    # Number of feature maps (outputs of convolutional layer)
    N_fm = 300
    # kernel size of convolutional layer
    kernel_size = 8
    conv_input_width = W.shape[1]
    conv_input_height = 200  # maxlen of sentence

    model = Sequential()
    # Embedding layer (lookup table of trainable word vectors)
    model.add(
        Embedding(input_dim=W.shape[0], output_dim=W.shape[1], weights=[W], W_constraint=unitnorm(), init="uniform")
    )
    # Reshape word vectors from Embedding to tensor format suitable for Convolutional layer
    model.add(Reshape(dims=(1, conv_input_height, conv_input_width)))

    # first convolutional layer
    model.add(Convolution2D(N_fm, kernel_size, conv_input_width, border_mode="valid", W_regularizer=l2(0.0001)))
    # ReLU activation
    model.add(Dropout(0.5))
    model.add(Activation("relu"))

    # aggregate data in every feature map to scalar using MAX operation
    model.add(MaxPooling2D(pool_size=(conv_input_height - kernel_size + 1, 1), border_mode="valid"))
    model.add(Dropout(0.5))
    model.add(Flatten())

    # Inner Product layer (as in regular neural network, but without non-linear activation function)
    model.add(Dense(input_dim=N_fm, output_dim=1))
    # SoftMax activation; actually, Dense+SoftMax works as Multinomial Logistic Regression
    model.add(Activation("sigmoid"))
    plot(model, to_file="./images/model.png")
    return model
Exemplo n.º 3
0
def make_keras_model(n_hidden_layers, dimof_middle, dimof_input):
        from keras.models import Sequential
        from keras.layers import Dense, Dropout, Activation, Flatten
        from keras.utils import np_utils, generic_utils
        from keras.wrappers.scikit_learn import KerasClassifier

        dimof_output =1

        print("dimof_input : ",dimof_input, "dimof_output : ", dimof_output)

        batch_size = 100
        dropout = 0.5
        countof_epoch = 5

        model = Sequential()
        model.add(Dense(input_dim=dimof_input, output_dim=dimof_middle, init="glorot_uniform",activation='relu'))
        model.add(Dropout(dropout))

        for n in range(n_hidden_layers):
                model.add(Dense(input_dim=dimof_middle, output_dim=dimof_middle, init="glorot_uniform",activation='relu'))
                model.add(Dropout(dropout))

        model.add(Dense(input_dim=dimof_middle, output_dim=dimof_output, init="glorot_uniform",activation='sigmoid'))

        #Compiling (might take longer)
	model.compile(class_mode='binary',loss='binary_crossentropy', optimizer='adam',metrics=["accuracy"])

	visualize_util.plot(model, to_file='model.png')

        return model
def visualize_model(model):

    model.summary()
    visualize_util.plot(model,
                        to_file='../../figures/%s.png' % model.name,
                        show_shapes=True,
                        show_layer_names=True)
 def __init__(self, options):
     """Initialise network structure."""
     # Pre-define batch for use in objective function feature_matching
     self.timesteps = options['timesteps']
     self.features = options['features']
     self.gamma = options['gamma']
     self.drop = options['dropout']
     self.plot = options['plot']
     self.epoch = options['epoch']
     self.batch_size = options['batch_size']
     self.init = options['layer_init']
     self.pre_train_D = options['pre_train_D']
     self.num_GRU = options["GRU_layers"]
     self.is_mir_eval = options["IS_MIR_EVAL"]
     self.G_lr = options["G_lr"]
     self.D_lr = options["D_lr"]
     self.GAN_lr = options["GAN_lr"]
     self.mir_eval = {'SAR': [],
                      'SIR': [],
                      'SDR': []}
     self.batch = np.zeros((self.batch_size, self.timesteps, self.features))
     self.D__init__()
     self.G__init__()
     self.GAN__init__()
     if self.plot:
         plot(self.GAN, to_file='model.png')
def createmodel(img_channels,img_rows,img_cols,classes=1):
	# This is a Sequential model. Graph models can be used in order to create more complex networks. 
	# Teaching Points:
	# 1. Here we utilize the adam optimization algorithm. In order to use the SGD algorithm one could replace the {adam=keras.optimizers.Adadelta(lr=0)} line with  {sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)} make sure you import the correct optimizer from keras. 
	# 2. This is a binary classification problem so make sure that the correct activation loss function combination is used. For such a problem the sigmoid activation function with the binary cross entropy loss is a good option
	# 3. Since this is a binary problem use 	model.add(Dense(1)) NOT 2...
	# 4. For multi class model this code can be easily modified by selecting the softmax as activation function and the categorical cross entropy as loss 

	model = Sequential()

	# first set of CONV => RELU => POOL
	model.add(Convolution2D(20, 5, 5, border_mode="same",
		input_shape=(img_channels, img_rows, img_cols)))
	model.add(Activation("relu"))
	model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

	# second set of CONV => RELU => POOL
	model.add(Convolution2D(50, 5, 5, border_mode="same"))
	model.add(Activation("relu"))
	model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
	# set of FC => RELU layers
	model.add(Flatten())
	model.add(Dense(500))
	model.add(Activation("relu"))
	# softmax classifier
	model.add(Dense(classes))
	# model.add(Activation('relu'))
	model.add(Activation('sigmoid'))
	# learning schedule callback
	adam=keras.optimizers.Adadelta(lr=0)
	lrate = LearningRateScheduler(step_decay)
	model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
	plot(model, to_file='modellen.png')
	return model
Exemplo n.º 7
0
def evaluate_single_model(model, folder_name, model_name, features_test, labels_test, save_model=True):
    cat_labels_test = np_utils.to_categorical(labels_test)
    loss, acc = model.evaluate(features_test, cat_labels_test, verbose=2)
    write = "**********Evaluating " + str(model_name) + "************\n"
    write += 'Testing data size: ' + str(len(labels_test)) + '\n'
    write += str(Counter(labels_test)) + '\n'
    write += 'loss: ' + str(loss) + ' acc' + str("%.2f" % round(acc, 4)) + '\n'

    result = model.predict(features_test)
    result_label = np.argmax(result, 1)
    gt_label = labels_test

    con_matrix = confusion_matrix(gt_label, result_label)
    write += str(con_matrix) + '\n'
    write += "Classification report:\n"
    write += str(classification_report(gt_label, result_label)) + '\n'

    #  create folder if not exists
    if not os.path.exists(folder_name):
        os.makedirs(folder_name)

    if save_model:
        model_json = model.to_json()
        with open(folder_name + model_name + "_model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights(folder_name + model_name + "_model.h5")
        print("Saved model to disk")
        plot(model, to_file=folder_name + model_name + "_model.png", show_shapes=True)

    return write, acc
Exemplo n.º 8
0
def imdb_test():
    # set parameters:
    max_features = 5000  # number of vocabulary
    maxlen = 200  # padding
    batch_size = 16
    nb_epoch = 10

    print('Loading data...')
    (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features,
                                                          test_split=0.2)

    print("Pad sequences (samples x time)")
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)
    nb_classes = 2
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)

    model = imdb_cnn()
    plot(model, to_file='./images/imdb_model.png')

    # try using different optimizers and different optimizer configs
    # model.compile(loss='binary_crossentropy', optimizer='adagrad', class_mode="binary")
    model.compile(loss='categorical_crossentropy', optimizer='adagrad')

    print("Train...")
    early_stopping = EarlyStopping(monitor='val_loss', patience=5)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(X_test, y_test),
              show_accuracy=True, callbacks=[early_stopping])
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
    print('Test score:', score)
    print('Test accuracy:', acc)
Exemplo n.º 9
0
def build_model(config):
    sent_input = Input(shape=(config.max_len,),dtype='int32',name = "sent_input") # define inpute
    sent_embedding = Embedding(config.vocab_size,config.word_dims,
                                # W_regularizer=l2(0.01),
                                dropout=0.2,
                                name="sent_w2v",)(sent_input)
    convs = []
    if config.pool_method=="mean":
        pool_method =  mean_1d
    elif  config.pool_method=="max":
        pool_method = max_1d

    for nb_filter,filter_length in zip(config.nb_filters,config.filter_lengths):
        conv = Convolution1D(nb_filter=nb_filter,
                        filter_length=filter_length,
                        border_mode='valid',
                        activation='relu',
                        subsample_length=1,
                        name="conv_len_{}_nb_{}".format(filter_length,nb_filter))(sent_embedding)
        pooling = Lambda(pool_method, output_shape=(nb_filter,),name="max_pooling_len_{}_nb_{}".format(filter_length,nb_filter))(conv)
        convs.append(pooling)
    concat =  merge(convs, mode='concat',name = "sent_merge")
    hidden =    Dropout(p = config.hidden_dropout)(Dense(config.hidden_dims,activation="relu",name = "sent_hidden")(concat))
    out = Activation('softmax')(Dense(config.label_size)(hidden))
    model = Model(input=sent_input, output=out,name=config.model_name)
    print("build completed")
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print(model.summary())
    from keras.utils.visualize_util import plot
    plot(model, to_file='flatten_10.png',show_shapes=True)
    return model
Exemplo n.º 10
0
 def compile_model(self, lr=0.0001, loss_weights=0.1):
     optimizer = Adam(lr=lr)
     loss = 'mse'
     # loss = custom_objective
     self.model.compile(optimizer=optimizer, loss=loss)
                        #metrics=['mse'])
     plot(self.model, to_file='model.png')
def get_new_model(in_width, lstm_hidden, out_width, batch_size, ltype):
    model = Sequential()
#    if args.time_dist_dense:
#        model.add(LSTM(in_width, lstm_hidden, return_sequences=True))
#        model.add(TimeDistributedDense(lstm_hidden, out_width))
#    else:
#        if args.rnn:
#            model.add(SimpleRNN(in_width, lstm_hidden, return_sequences=False))
#        else:
    print "Number of hidden layers:", args.depth
#    model.add(LSTM(lstm_hidden , input_dim=in_width, return_sequences=False if args.depth == 0 else True))
    
    for i in range(args.depth):
        model.add(generate_layer(ltype, in_width, lstm_hidden, "last" if i+1 == args.depth else "first" if i==0 else "middle"))
#        model.add(LSTM(lstm_hidden , input_dim=in_width, return_sequences=False if i+1 == args.depth else True))
    model.add(Dropout(args.dropout))
    model.add(Dense(out_width))
    model.add(Activation('softmax'))

    optimizer = Adagrad(clipnorm=args.clipnorm)

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=optimizer)

    from keras.utils.visualize_util import plot
    plot(model, show_shapes=True, to_file='model.png')
    return model
Exemplo n.º 12
0
def draw_model(model):
    from IPython.display import SVG
    from keras.utils.visualize_util import to_graph
    from keras.utils.visualize_util import plot

    SVG(to_graph(model).create(prog='dot', format='svg'))
    plot(model, to_file='UFCNN/UFCNN_1.png')
 def build_model(self):
     lstm_branch = []
     input_branch = []
     for i in range(self.layers_number):
         main_input = Input(shape=(self.lstm_timesteps, self.input_dim), name="main_input_" + str(i))
         input_branch.append(main_input)
         lstm_out = LSTM(self.lstm_hidden, return_sequences=True)(main_input)
         # auxiliary_input = Input(batch_shape=(self.batch_size,1,self.lstm_timesteps), name='auxiliary_input'+str(i))
         auxiliary_input = Input(shape=(1, self.lstm_timesteps), name="auxiliary_input" + str(i))
         input_branch.append(auxiliary_input)
         """
         x1 = Merge([lstm_out, auxiliary_input], mode=lambda x, y: (x*y).sum(axis=0),
                 name='merge_lstm_auxi'+str(i))
         """
         x1 = merge([auxiliary_input, lstm_out], mode="dot", dot_axes=[2, 1], name="merge_lstm_auxi" + str(i))
         assert x1
         flatten = Reshape((self.lstm_hidden,))(x1)
         c_input = Input(shape=(6,), name="c_input" + str(i))
         input_branch.append(c_input)
         x2 = merge([flatten, c_input], mode="concat")
         x2 = Dense(
             self.lstm_hidden, activation="relu", W_regularizer=l2(0.001), activity_regularizer=activity_l2(0.001)
         )(x2)
         assert x2
         lstm_branch.append(x2)
     lstm_all_out = merge(lstm_branch, mode="sum", name="lstm_all_out")
     """
     dense_relu = Dense(self.lstm_hidden, activation='relu', W_regularizer=l2(0.001),
             activity_regularizer=activity_l2(0.001))(lstm_all_out)
     """
     final_loss = Dense(self.output_dim, name="main_output")(lstm_all_out)
     self.model = Model(input_branch, output=final_loss)
     self.model.compile(loss="mean_squared_error", optimizer="adagrad")
     plot(self.model, to_file="multiple_model.png", show_shapes=True)
Exemplo n.º 14
0
def save_model(model, name="model"):
  model_json = model.to_json()

  model_out = open(name + '.json', 'w')
  model_out.write(model_json)
  model_out.close()
  plot(model, to_file='model_' + name + '.png')
  model.save_weights('weights_' + name, overwrite=True)
def main():
    #load data 
    #X_train,Y_train,X_valid,Y_valid,X_test=load_data(training_dir,valid_dir,test_dir,labels,sample)
    #preprocess data by mean subtraction and normalization 
    #X_train,X_valid,X_test=preprocess(X_train,X_valid,X_test)
    #del X_train
    #del X_test

    #or load pre-processed data from a previously saved hdf5 file:

    data=h5py.File('imagenet.hdf5','r')
    X_train=np.asarray(data['X_train']) 
    Y_train=np.asarray(data['Y_train']) 
    X_valid=np.asarray(data['X_valid']) 
    Y_valid=np.asarray(data['Y_valid']) 
    X_test=np.asarray(data['X_test']) 
    print(Y_valid.shape) 
    print(X_valid.shape) 
    #print "loaded data from pickle" 
    #OPTIONAL: save loaded/pre-processed data to a pickle to save time in the future
    '''
    print "saving preprocessed data to hdf5 file" 
    f=h5py.File('imagenet.hdf5','w')
    dset_xtrain=f.create_dataset("X_train",data=X_train)
    dset_ytrain=f.create_dataset("Y_train",data=Y_train) 
    dset_xvalid=f.create_dataset("X_valid",data=X_valid) 
    dset_yvalid=f.create_dataset("Y_valid",data=Y_valid) 
    dset_xtest=f.create_dataset("X_test",data=X_test) 
    f.flush() 
    f.close() 
    print "done saving pre-processed data to hdf5 file!" 
    '''
    #train a VGG-like convent

    vgg_model,history=vgg_train(X_train,Y_train)    
    train_scores=vgg_evaluate(vgg_model,X_train,Y_train)
    print "VGG-like net training scores:"+str(train_scores) 
    valid_scores=vgg_evaluate(vgg_model,X_valid,Y_valid)
    print "VGG-like net validation scores:"+str(valid_scores)
    #Visualize the pretty model
    plot(vgg_model,to_file="vgg_like_convnet.png") 
    predictions=vgg_model.predict(X_test,verbose=1) 
    class_predictions=vgg_model.predict_classes(X_test) 
    #save all the outputs! 
    sys.setrecursionlimit(50000) 
    output=open('vgg_like_results.pkl','w') 
    pickle.dump(history,output) 
    pickle.dump(train_scores,output) 
    pickle.dump(valid_scores,output) 
    pickle.dump(predictions,output) 
    pickle.dump(class_predictions,output) 
    output.close() 

    #train a Keras version of the ConvNet implemented in Assignment#2 in class
    #TODO

    #train AlexNet
    '''
Exemplo n.º 16
0
def train_and_predict():
    imgs_train, imgs_output_train = load_train_data()

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_output_train = imgs_output_train.astype('float32')
    imgs_output_train /= 255.  # scale outputs to [0, 1]

    imgs_test, imgs_output_test = load_test_data()

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std
    
    imgs_output_test = imgs_output_test.astype('float32')
    imgs_output_test /= 255.  # scale outputs to [0, 1]

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='val_loss', save_best_only=True )

    print('-'*30)
    print('Fitting model...')
    print('-'*30)

    earlyStopping=EarlyStopping(monitor='val_loss', patience=20, verbose=0, mode='auto')
    model.fit(imgs_train, imgs_output_train, batch_size=16, nb_epoch=100, verbose=1, callbacks=[model_checkpoint, earlyStopping], validation_split=0.1, validation_data=None, shuffle=True)
        
    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('unet.hdf5')

    print('-'*30)
    print('Predicting outputs on test data...')
    print('-'*30)
    imgs_output_test2 = model.predict(imgs_test, verbose=1)
    np.save('imgs_output_test2.npy', imgs_output_test2)

    print('-'*30)
    print('Evaluating test loss...')
    print('-'*30)
    score = model.evaluate(imgs_test, imgs_output_test, batch_size=16)
    print(score)

    print('-'*30)
    print('Ploting model...')
    print('-'*30)
    plot(model, to_file='model.png')
    def plot_model(self, filename='brnn_model.png'):
        """
        Plot model.

        Arguments:
            filename: {string}, the name/path to the file
                to which the weights are going to be plotted.
        """
        print "Plot Model %s ..." %filename
        plot(self.model, to_file=filename)
Exemplo n.º 18
0
 def write_model_graph(self):
     model = self.create_model()
     # write dot text to file
     with open(config["model_dot"], "wb") as out:
         m = model_to_dot(model)
         dot_text = m.create_dot()
         out.write(dot_text)
         print("Wrote .dot to {}".format(config["model_dot"]))
     # write graph to file
     plot(model, to_file=config["model_graph"], show_shapes=True)
Exemplo n.º 19
0
    def plot_model(self, filename):
        """
        Plot model.

        Arguments:
            filename: {string}, the name/path to the file
                to which the model graphic is plotted.
        """
        print "Plot Model %s ..." %filename
        plot(self.model, to_file=filename)
Exemplo n.º 20
0
def draw_model(model):
    from IPython.display import SVG
    from keras.utils.visualize_util import to_graph
    from keras.utils.visualize_util import plot

    #graph = to_graph(model, show_shape=True)
    #graph.write_png("UFCNN_1.png")

    SVG(to_graph(model).create(prog='dot', format='svg'))
    plot(model, to_file='UFCNN_1.png')
Exemplo n.º 21
0
    def build(self):
        enc_size = self.size_of_env_observation()
        argument_size = IntegerArguments.size_of_arguments
        input_enc = InputLayer(batch_input_shape=(self.batch_size, enc_size), name='input_enc')
        input_arg = InputLayer(batch_input_shape=(self.batch_size, argument_size), name='input_arg')
        input_prg = Embedding(input_dim=PROGRAM_VEC_SIZE, output_dim=PROGRAM_KEY_VEC_SIZE, input_length=1,
                              batch_input_shape=(self.batch_size, 1))

        f_enc = Sequential(name='f_enc')
        f_enc.add(Merge([input_enc, input_arg], mode='concat'))
        f_enc.add(MaxoutDense(128, nb_feature=4))
        self.f_enc = f_enc

        program_embedding = Sequential(name='program_embedding')
        program_embedding.add(input_prg)

        f_enc_convert = Sequential(name='f_enc_convert')
        f_enc_convert.add(f_enc)
        f_enc_convert.add(RepeatVector(1))

        f_lstm = Sequential(name='f_lstm')
        f_lstm.add(Merge([f_enc_convert, program_embedding], mode='concat'))
        f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001)))
        f_lstm.add(Activation('relu', name='relu_lstm_1'))
        f_lstm.add(RepeatVector(1))
        f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001)))
        f_lstm.add(Activation('relu', name='relu_lstm_2'))
        # plot(f_lstm, to_file='f_lstm.png', show_shapes=True)

        f_end = Sequential(name='f_end')
        f_end.add(f_lstm)
        f_end.add(Dense(1, W_regularizer=l2(0.001)))
        f_end.add(Activation('sigmoid', name='sigmoid_end'))

        f_prog = Sequential(name='f_prog')
        f_prog.add(f_lstm)
        f_prog.add(Dense(PROGRAM_KEY_VEC_SIZE, activation="relu"))
        f_prog.add(Dense(PROGRAM_VEC_SIZE, W_regularizer=l2(0.0001)))
        f_prog.add(Activation('softmax', name='softmax_prog'))
        # plot(f_prog, to_file='f_prog.png', show_shapes=True)

        f_args = []
        for ai in range(1, IntegerArguments.max_arg_num+1):
            f_arg = Sequential(name='f_arg%s' % ai)
            f_arg.add(f_lstm)
            f_arg.add(Dense(IntegerArguments.depth, W_regularizer=l2(0.0001)))
            f_arg.add(Activation('softmax', name='softmax_arg%s' % ai))
            f_args.append(f_arg)
        # plot(f_arg, to_file='f_arg.png', show_shapes=True)

        self.model = Model([input_enc.input, input_arg.input, input_prg.input],
                           [f_end.output, f_prog.output] + [fa.output for fa in f_args],
                           name="npi")
        self.compile_model()
        plot(self.model, to_file='model.png', show_shapes=True)
Exemplo n.º 22
0
 def presave(self, model_directory):
     """
     Save the model architecture to the given directory.
     :param model_directory: Directory to save model and weights.
     :return:
     """
     try:
         open(os.path.join(model_directory, self.__architecture_name), 'w').write(self.model.to_json(indent=2))
     except Exception as e:
         self.logger.warn("Model structure is not saved due to: %s" % repr(e))
     plot(self.model, to_file=os.path.join(model_directory, self.__model_graph_name))
Exemplo n.º 23
0
def visualize_model(args):
    model = MoleculeVAE()

    data, charset = load_dataset(args.data, split = False)

    if os.path.isfile(args.model):
        model.load(charset, args.model)
    else:
        raise ValueError("Model file %s doesn't exist" % args.model)

    plot(model.autoencoder, to_file = args.outfile)
Exemplo n.º 24
0
def main():
    # init-----------------------------------------------------------------
    train_samples = dtp.sample_paths_from_list(DATA_LOC, LIST_LOC+TRAIN_LIST)
    eval_samples  = dtp.sample_paths_from_list(DATA_LOC, LIST_LOC+EVAL_LIST)
    classes = open(DICT, 'r+').read().splitlines()
    nb_classes = len(classes)
    batch_size = 180

    # load pretrained models----------------------------------------------------
    pretrained = load_model(PRETRAINED_LOC, PRETRAINED_NAME)

    # generate RGB stream model------------------------------------------------------
    if not model_exist(RGB_MODEL_NAME):
        print 'Training RGB stream model...\n'
        rgb_stream = eitel.create_single_stream(nb_classes, pretrained, tag='_rgb')
        plot(rgb_stream, 'stream_model.png')
        rgb_stream = train_model(rgb_stream, 0, batch_size, train_samples, eval_samples, classes)
        
        if SAVE_MODEL:
            save_model(rgb_stream, RGB_MODEL_NAME)
            del rgb_stream
    else:
        print 'RGB stream model already exists...'

    # generate RGB stream model------------------------------------------------------
    if not model_exist(DEP_MODEL_NAME):
        print 'Training depth stream model...\n'
        dep_stream = eitel.create_single_stream(nb_classes, pretrained, tag='_dep')
        dep_stream = train_model(dep_stream, 1, batch_size, train_samples, eval_samples, classes)
        
        if SAVE_MODEL:
            save_model(dep_stream, DEP_MODEL_NAME)
            del dep_stream
    else:
        print 'Depth stream model already exists...'

    # reload the model weights----------------------------------------------------
    print 'Loading weights...'
    rgb_stream = load_model(MODEL_LOC, RGB_MODEL_NAME)
    dep_stream = load_model(MODEL_LOC, DEP_MODEL_NAME)

    # fusion model-----------------------------------------------------------
    print 'Fusing stream models...'
    fusion_model = eitel.create_model_merge(rgb_stream, dep_stream, nb_classes)
    del rgb_stream
    del dep_stream
    plot(fusion_model, 'fusion_model.png')

    batch_size=200
    fusion_model = train_model(fusion_model, 2, batch_size, train_samples, eval_samples, classes)
    if SAVE_MODEL:
        save_model(fusion_model, FUS_MODEL_NAME)
def train_network(X, Y, epochs=1, train=True, filename=None, load_model=False, finetune=False, task_id=None, independent=False):
    global model, history
    if load_model:
        print('Loading model')
        input_model = open(filename[0], 'r')
        model = model_from_json(input_model.read())

        input_model.close()
        model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
        model.load_weights(filename[1])
        print('Loaded(probably)')
        if finetune:
            lastLayer = None
            print(model.layers[0].__dict__)
            for layer in model.layers:
                if (layer.name == 'dense_1'):
                    nn.conv = layer
                lastLayer = layer
            import h5py

    if not load_model or finetune:
        X, Y, _ = transform_data(X, Y, task_id)
        print('Labels shape: ' + str(Y.shape))
        print('Labels look like this : ')
        print(Y[0])
        if not finetune:
            nn.construct_model(X[0].shape)

        nn.add_new_task(len(Y[0]), independent)
        model = nn.get_model().model

    if train:

        print('Training started')
        input_model = open(filename[0], 'w')
        input_model.write(model.to_json())
        input_model.close()
        # model.summary()
        from keras.utils.visualize_util import plot
        plot(model, to_file='model.png')

        print('Fitting')
        history = nn.get_model().fit(X, Y, epoch=epochs)
        task_dims.append(Y.shape[1])

        print('Training end')
        if filename is not None:
            input_model = open(filename[0], 'w')
            input_model.write(model.to_json())
            input_model.close()

            model.save_weights(filename[1], overwrite=True)
def save_network(model, problem_name, operation_name, network_name):
    """ saves the given network as a PNG image.

    Args:
        model: The Keras Suequential model.
        problem_name: The name of target problem.
        operation_name: The name of arithmetic operation.
        network_name: The name of the network.

    """

    model_name = problem_name + "_" + operation_name + network_name
    plot(model, to_file=model_name + ".png", show_shapes=True)
Exemplo n.º 27
0
def SA_sst():
    ((x_train_idx_data, y_train_valence, y_train_labels,
      x_test_idx_data, y_test_valence, y_test_labels,
      x_valid_idx_data, y_valid_valence, y_valid_labels,
      x_train_polarity_idx_data, y_train_polarity,
      x_test_polarity_idx_data, y_test_polarity,
      x_valid_polarity_idx_data, y_valid_polarity), W) = nn_input()                    #  build_keras_input_amended or build_keras_input

    maxlen = 200  # cut texts after this number of words (among top max_features most common words)
    batch_size = 16
    (X_train, y_train), (X_test, y_test), (X_valid, y_valide) = (x_train_polarity_idx_data, y_train_polarity), (
    x_test_polarity_idx_data, y_test_polarity), (x_valid_polarity_idx_data, y_valid_polarity)
    print(len(X_train), 'train sequences')
    print(len(X_test), 'test sequences')
    # m= 0
    # for i in X_train:
    #     if len(i) >0:
    #         for j in i:
    #             if j > m:
    #                 m=j
    # print(m)
    max_features = W.shape[0]  # shape of W: (13631, 300) , changed to 14027 through min_df = 3

    print("Pad sequences (samples x time)")
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    X_valid = sequence.pad_sequences(X_valid, maxlen=maxlen)
    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)
    nb_classes = 2
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    y_valide = np_utils.to_categorical(y_valide, nb_classes)

    model = Deep_CNN(W)
    plot(model, to_file='./images/model.png')

    # try using different optimizers and different optimizer configs
    # model.compile(loss='binary_crossentropy', optimizer='adagrad', class_mode="binary")
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer='adagrad')   # adagrad

    print("Train...")
    early_stopping = EarlyStopping(monitor='val_loss', patience=5)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=20, validation_data=(X_test, y_test), show_accuracy=True,
              callbacks=[early_stopping])
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
    print('Test score:', score)
    print('Test accuracy:', acc)
Exemplo n.º 28
0
def check_print():
    # Create the Model
    x, img_input, CONCAT_AXIS, INP_SHAPE, DIM_ORDERING = create_model()

    # Create a Keras Model - Functional API
    model = Model(input=img_input,
                  output=[x])
    model.summary()

    # Save a PNG of the Model Build
    plot(model, to_file='./Model/AlexNet.png')

    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy')
    print('Model Compiled')
Exemplo n.º 29
0
def createModel(settings, createPlot):
    print("Building Model...")
    model = Sequential()

    # if (settings.depth > 1):
    model.add(LSTM(settings.hiddenNodes, return_sequences=True, input_shape=(settings.sequence_size, settings.N_values)))
    
    model.add(Dropout(settings.dropoutAmount))
    # else:
    #     model.add(LSTM(settings.hiddenNodes, return_sequences=False, input_shape=(settings.sequence_size, settings.N_values)))
         # inner_init='orthogonal'
    #     model.add(Dropout(settings.dropoutAmount))

    for i in range(1, settings.depth-1):
        model.add(LSTM(settings.hiddenNodes, return_sequences=True))
        model.add(Dropout(settings.dropoutAmount))

    model.add(LSTM(settings.hiddenNodes, return_sequences=False))
    model.add(Dropout(settings.dropoutAmount))
    # model.add(LSTM(settings.hiddenNodes, return_sequences=False, input_shape=(settings.sequence_size, settings.N_values)))
    # model.add(LSTM(settings.hiddenNodes, return_sequences=True))
    # model.add(LSTM(settings.hiddenNodes, return_sequences=True))
    # model.add(LSTM(settings.hiddenNodes, return_sequences=True))
    # model.add(LSTM(settings.hiddenNodes, return_sequences=False))

    
    if (settings.l1Amount > 0):
        model.add(Dense(settings.N_values, W_regularizer=l1(settings.l1Amount)))
    elif settings.l2Amount > 0:
        model.add(Dense(settings.N_values, W_regularizer=l2(settings.l2Amount)))
    else:
        model.add(Dense(settings.N_values))
    
    model.add(Activation(settings.activation))
    model.compile(loss=settings.lossType, optimizer=settings.optimizer)

    settings.filename = settings.activation + "_" + settings.trainingset + "_nodes" + str(settings.hiddenNodes) +"_depth" + str(settings.depth) + "_seq" + str(settings.sequence_size) + "_drop" + str(settings.dropoutAmount) + "_L1r" + str(settings.l1Amount) + "_L2r" + str(settings.l2Amount) 
    save_model_scratch(model, settings.filename, 0, False)
    
    with open('/var/scratch/jvranken/models/' + settings.filename + '/model_settings.txt', 'w') as settingsFile:
        for (setting, value) in vars(settings).items():
            settingsFile.write(setting + ': ' + str(value) + '\n')

    if createPlot:
        plot(model, to_file='/var/scratch/jvranken/models/' + settings.filename + '/model_layout.png', show_shapes=True)

    return model
Exemplo n.º 30
0
def train(x_train, y_train, x_test, y_test, vocab_size):
	model = Sequential()
	model.add(Embedding(vocab_size, 128, input_length=maxlen, dropout=0.5, mask_zero = True))
	model.add(LSTM(128, dropout_W=0.5, dropout_U=0.1))  # try using a GRU instead, for fun
	model.add(Dropout(0.5))
	model.add(Dense(vocab_size))
	model.add(Activation('softmax'))
	model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
	print('Train...')
	batch_size = 32
	plot(model, to_file='model.png', show_shape = True)
	model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=15,
	          validation_data=(x_test, y_test), show_accuracy=True)
	score, acc = model.evaluate(x_test, y_test,
	                            batch_size=batch_size,
	                            show_accuracy=True)
	print('Test score:', score)
	print('Test accuracy:', acc)
Exemplo n.º 31
0
    dp2 = Dropout(0.5)(fc2)

    output = Dense(1)(dp2)

    model = Model(input=[uinput, iinput], output=output)
    sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(optimizer=sgd,
    #           loss='mse')
    model.compile(optimizer='adam', loss='mse')

    model_name = 'mlp.hdf5'
    model_checkpoint = ModelCheckpoint(path + model_name,
                                       monitor='val_loss',
                                       save_best_only=True)
    plot(model,
         to_file=path + '%s.png' % model_name.replace('.hdf5', ''),
         show_shapes=True)

    nb_epoch = 5
    batch_size = 1024 * 6
    load_model = False

    if load_model:
        model.load_weights(path + model_name)

    model.fit([u_train, i_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=True,
Exemplo n.º 32
0
        x[1, :, :] -= 116.779
        x[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        x = x[:, :, ::-1]
        # Zero-center by mean pixel
        x[:, :, 0] -= 103.939
        x[:, :, 1] -= 116.779
        x[:, :, 2] -= 123.68
    return x


# create the base pre-trained model
base_model = VGG16(weights='imagenet')
plot(base_model,
     to_file='./results/modelVGG16a.png',
     show_shapes=True,
     show_layer_names=True)

x = base_model.get_layer('fc2').output
x = base_model.layers[-2].output
# x = base_model.get_layer('block5_pool').output
# x = Flatten(name='flat')(x)
# x = Dense(4096, activation='relu', name='fc')(x)
x = Dense(8, activation='softmax', name='predictions')(x)

model = Model(input=base_model.input, output=x)
plot(model,
     to_file='./results/modelVGG16b.png',
     show_shapes=True,
     show_layer_names=True)
for layer in base_model.layers:
Exemplo n.º 33
0
 def train(self,
           max_sent_len,
           train_inputs,
           train_labels,
           num_epochs=20,
           mlp_size=1024,
           mlp_activation='relu',
           dropout=None,
           embedding_file=None,
           tune_embedding=True,
           num_mlp_layers=2,
           batch=None,
           patience=5):
     '''
     train_inputs (list(numpy_array)): The two sentence inputs
     train_labels (numpy_array): One-hot matrix indicating labels
     num_epochs (int): Maximum number of epochs to run
     mlp_size (int): Dimensionality of each layer in the MLP
     dropout (dict(str->float)): Probabilities in Dropout layers after "embedding" and "encoder" (lstm)
     embedding (numpy): Optional pretrained embedding
     tune_embedding (bool): If pretrained embedding is given, tune it.
     patience (int): Early stopping patience
     '''
     if dropout is None:
         dropout = {}
     num_label_types = train_labels.shape[
         1]  # train_labels is of shape (num_samples, num_label_types)
     sent1_input_layer = Input(name='sent1',
                               shape=train_inputs[0].shape[1:],
                               dtype='int32')
     sent2_input_layer = Input(name='sent2',
                               shape=train_inputs[1].shape[1:],
                               dtype='int32')
     encoded_sent1, encoded_sent2 = self._get_encoded_sentence_variables(
         max_sent_len,
         sent1_input_layer,
         sent2_input_layer,
         dropout,
         embedding_file,
         tune_embedding,
         batch=32 if batch == None else batch)
     concat_sent_rep = merge([encoded_sent1, encoded_sent2], mode='concat')
     mul_sent_rep = merge([encoded_sent1, encoded_sent2], mode='mul')
     diff_sent_rep = merge([encoded_sent1, encoded_sent2],
                           mode=lambda l: l[0] - l[1],
                           output_shape=lambda l: l[0])
     # Use heuristic from Mou et al. (2015) to get final merged representation
     merged_sent_rep = merge([concat_sent_rep, mul_sent_rep, diff_sent_rep],
                             mode='concat')
     current_projection = merged_sent_rep
     for i in range(num_mlp_layers):
         mlp_layer_i = Dense(output_dim=mlp_size,
                             activation=mlp_activation,
                             name="%s_layer_%d" % (mlp_activation, i))
         current_projection = mlp_layer_i(current_projection)
     if dropout is not None:
         if "output" in dropout:
             current_projection = Dropout(
                 dropout["output"])(current_projection)
     softmax = Dense(output_dim=num_label_types,
                     activation='softmax',
                     name='softmax_layer')
     label_probs = softmax(current_projection)
     model = Model(input=[sent1_input_layer, sent2_input_layer],
                   output=label_probs)
     model.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
     self.model = model
     print >> sys.stderr, "Entailment model summary:"
     model.summary()
     plot(model,
          to_file='model_plot.png',
          show_shapes=True,
          show_layer_names=True)
     best_accuracy = 0.0
     num_worse_epochs = 0
     for epoch_id in range(num_epochs):
         print >> sys.stderr, "Epoch: %d" % epoch_id
         history = model.fit(train_inputs,
                             train_labels,
                             validation_split=0.1,
                             nb_epoch=1)
         validation_accuracy = history.history['val_acc'][
             0]  # history['val_acc'] is a list of size nb_epoch
         if validation_accuracy > best_accuracy:
             self.save_model(epoch_id)
             self.best_epoch = epoch_id
             num_worse_epochs = 0
             best_accuracy = validation_accuracy
         elif validation_accuracy < best_accuracy:
             num_worse_epochs += 1
             if num_worse_epochs >= patience:
                 print >> sys.stderr, "Stopping training."
                 break
     self.save_best_model()
Exemplo n.º 34
0
import argparse
from keras.models import model_from_json
from keras.utils.visualize_util import plot

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Network visualization')
    parser.add_argument(
        'model',
        type=str,
        help=
        'Path to model definition json. Model weights should be on the same path.'
    )
    args = parser.parse_args()
    with open(args.model, 'r') as jfile:
        # NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
        # then you will have to call:
        #
        #   model = model_from_json(json.loads(jfile.read()))\
        #
        # instead.
        model = model_from_json(jfile.read())

    model.compile("adam", "mse")
    plot(model, show_shapes=True, to_file='img/model.png')
Exemplo n.º 35
0
def main():
    emotion_classifier = load_model(model_path)
    emotion_classifier.summary()
    plot(emotion_classifier, to_file='model.png')
Exemplo n.º 36
0
model.add(Permute((2,1,3,4)))
print(model.layers[6].output_shape)
model.add(Reshape((100,16*5*4)))

model.add(LSTM(128,return_sequences=True))
model.add(LSTM(128,return_sequences=True))
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.summary()

modelname = 'cube_cnn_2l_tmr_filtered'

plot(model, to_file='/media/kong/9A8821088820E48B/Xcat/experiment/BCI2008/ds1a/model/arange_pole/'+modelname+'.png', show_shapes=True)

opt = Adam()
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
checkpointer = ModelCheckpoint(filepath='/media/kong/9A8821088820E48B/Xcat/experiment/BCI2008/ds1a/model/arange_pole/'+modelname+'.h5',verbose=1,save_best_only=True, monitor='val_acc')

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])

#model.save('/home/xcat/MasterPaper/model/' + modelname + '.h5')

score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
Exemplo n.º 37
0
        border_mode='valid',
        input_shape=(1, img_rows, img_cols),
        # W_regularizer = l1l2ld(l1 = 0., l2 = 0., ld = 0.),
        W_regularizer=l2(l=0.),
        b_regularizer=l2(l=0.)))
# model.add(AveragePooling2D(pool_size=(5, 5)))
# model.add(Dropout(0.25))

model.add(Flatten())
# model.add(Dense(128))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(nb_classes, W_regularizer=l2(l=0.), b_regularizer=l2(l=0.)))
model.add(Activation('softmax'))

plot(model, to_file="./mnist.png", show_shapes=True)
# sys.exit()

for i in range(len(model.layers)):
    print("i: ", i)
    print(model.layers[i].get_config())

# sys.exit()

model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

model.fit(X_train,
          Y_train,
          batch_size=batch_size,
Exemplo n.º 38
0
    def build_resnet_152(input_shape, num_outputs):
        return ResNetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])

# Define resnet model between 18, 34, 50, 101 or 152
model = ResNetBuilder.build_resnet_18((3, img_width, img_height), 1)

print('-'*30)
print('Model summary...')
print('-'*30)
model.summary()

if(PRINT_MODEL):
    print('-'*30)
    print('Printing model...')
    print('-'*30)
    plot(model, to_file='resnet50_keras_model.png')

model.compile(loss='binary_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
                    rescale=1./255,
                    shear_range=0,
                    rotation_range=40, # randomly rotate images in the range (degrees, 0 to 180)
                    width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
                    height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
                    zoom_range=0.2,
                    horizontal_flip=True, # randomly flip images
                    vertical_flip=False)  # randomly flip images
Exemplo n.º 39
0
        net['conv8_2_mbox_priorbox'], net['pool6_mbox_priorbox']
    ],
                                 mode='concat',
                                 concat_axis=1,
                                 name='mbox_priorbox')
    if hasattr(net['mbox_loc'], '_keras_shape'):
        num_boxes = net['mbox_loc']._keras_shape[-1] // 4
    elif hasattr(net['mbox_loc'], 'int_shape'):
        num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4
    net['mbox_loc'] = Reshape((num_boxes, 4),
                              name='mbox_loc_final')(net['mbox_loc'])
    net['mbox_conf'] = Reshape((num_boxes, num_classes),
                               name='mbox_conf_logits')(net['mbox_conf'])
    net['mbox_conf'] = Activation('softmax',
                                  name='mbox_conf_final')(net['mbox_conf'])
    net['predictions'] = merge(
        [net['mbox_loc'], net['mbox_conf'], net['mbox_priorbox']],
        mode='concat',
        concat_axis=2,
        name='predictions')
    model = Model(net['input'], net['predictions'])
    return model


if __name__ == "__main__":
    ssd = SSD512((512, 512, 3))
    ssd.summary()

    from keras.utils.visualize_util import plot
    plot(ssd, to_file='ssd_model.png', show_layer_names=True, show_shapes=True)
Exemplo n.º 40
0
question_net = Sequential()
question_net.add(q_embedding_layer)
# question_net.add(LSTM(EMBEDDING_DIM, consume_less='cpu'))
# question_net.add(RepeatVector(400))
# question_net.add(Dense(EMBEDDING_DIM))
# question_net.add(Bidirectional(GRU(EMBEDDING_DIM)))
# question_net.add(Activation('tanh'))
# question_net.add(Dropout(0.1))
question_net.add(Bidirectional(LSTM(EMBEDDING_DIM, return_sequences=True)))
# question_net.add(GRU(EMBEDDING_DIM, return_sequences=True))
# question_net.add(LSTM(EMBEDDING_DIM, return_sequences=True, consume_less='mem'))
print("question layer shape:", question_net.layers[-1].output_shape)
# question_net.add(Dense(1, activation='sigmoid'))

plot(question_net, to_file='question_net.png', show_shapes=True)

merged = Merge([passage_net, question_net], mode='dot')
# merged = Merge([passage_net, question_net], mode='cos')
print("merged layer shape:", question_net.layers[-1].output_shape)

model = Sequential()

model.add(merged)

# multiply passage by the dot product
# add softmax here

# model.add(Dropout(.5))
# model.add(TimeDistributedDense(100, activation='softmax'))
model.add(Permute((2, 1)))
Exemplo n.º 41
0
print "-----------------"
print(hist.history.get('loss')[-1])
print "-----------------"

loss=hist.history.get('loss')
acc=hist.history.get('acc')
model.save('Share_Weight/model_122222.h5')
np.save('Share_Weight/loss_122222.npy',loss)
np.save('Share_Weight/acc_122222.npy',acc)
sendEmail('Modelling Done')

model=load_model('Share_Weight/model_122222.h5')
loss=np.load('Share_Weight/loss_122222.npy')
acc=np.load('Share_Weight/acc_122222.npy')

plot(model, to_file='Share_Weight/TreeModel_122222.png')

rangeNum=300000 #300000(start——end)组算例
avgerr=np.zeros((rangeNum,len(opL)))

preStart=200000
preNum=100000
print("predict")
inputnum = len(input[0, :])
outputnumL = len(outputL[0, :])
outputnumR = len(outputR[0, :])

predictend = preStart + preNum
# 真实数据作为测试输入
testinput = data[preStart:predictend, ip]
# 真实数据作为输出,用来对比效果
Exemplo n.º 42
0
conv2 = Convolution2D(nb_filters, kernel_size_2[0], kernel_size_2[1], border_mode='same')
model.add(conv2)
model.add(Activation('relu'))

pool2 = MaxPooling2D(pool_size=pool_size_2)
model.add(pool2)

model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.summary()

modelname = '2cnn_adam_max_relu'

plot(model, to_file='/home/xcat/experiment/BCI2008/ds1a/model/on-filtered-data/'+modelname+'.png', show_shapes=True)

opt = Adam()
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
checkpointer = ModelCheckpoint(filepath='/home/xcat/experiment/BCI2008/ds1a/model/on-filtered-data/'+modelname+'.h5',verbose=1,save_best_only=True, monitor='val_acc')

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])

#model.save('/home/xcat/MasterPaper/model/' + modelname + '.h5')

score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
Exemplo n.º 43
0
def convNet_LeNet(

	VERBOSE=1,
	# normlize
	NORMALIZE = True,
	# Network Parameters
	BATCH_SIZE = 128,
	NUM_EPOCHS = 20,
	# Number of convolutional filters 
	NUM_FILTERS = 32,
	# side length of maxpooling square
	NUM_POOL = 2,
	# side length of convolution square
	NUM_CONV = 3,
	# dropout rate for regularization
	DROPOUT_RATE = 0.5,
	# hidden number of neurons first layer
	NUM_HIDDEN = 128,
	# validation data
	VALIDATION_SPLIT=0.2, # 20%
	# optimizer used
	OPTIMIZER = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
	# regularization
	REGULARIZER = l2(0.01)
	): 

	# Output classes, number of MINST DIGITS
	NUM_CLASSES = 10
	# Shape of an MINST digit image
	SHAPE_X, SHAPE_Y = 28, 28
	# Channels on MINST
	IMG_CHANNELS = 1

	# LOAD the MINST DATA split in training and test data
	(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
	X_train = X_train.reshape(X_train.shape[0], 1, SHAPE_X, SHAPE_Y)
	X_test = X_test.reshape(X_test.shape[0], 1, SHAPE_X, SHAPE_Y)

	# convert in float32 representation for GPU computation
	X_train = X_train.astype("float32")
	X_test = X_test.astype("float32")

	if (NORMALIZE):
		# NORMALIZE each pixerl by dividing by max_value=255
		X_train /= 255
		X_test /= 255
	print('X_train shape:', X_train.shape)
	print(X_train.shape[0], 'train samples')
	print(X_test.shape[0], 'test samples')
	 
	# KERAS needs to represent each output class into OHE representation
	Y_train = np_utils.to_categorical(Y_train, NUM_CLASSES)
	Y_test = np_utils.to_categorical(Y_test, NUM_CLASSES)

	nn = Sequential()
	 
	#FIRST LAYER OF CONVNETS, POOLING, DROPOUT
	#  apply a NUM_CONV x NUM_CONF convolution with NUM_FILTERS output
	#  for the first layer it is also required to define the input shape
	#  activation function is rectified linear 
	nn.add(Convolution2D(NUM_FILTERS, NUM_CONV, NUM_CONV, 
		input_shape=(IMG_CHANNELS, SHAPE_X, SHAPE_Y) ))
	nn.add(Activation('relu'))
	nn.add(Convolution2D(NUM_FILTERS, NUM_CONV, NUM_CONV))
	nn.add(Activation('relu'))
	nn.add(MaxPooling2D(pool_size = (NUM_POOL, NUM_POOL)))
	nn.add(Dropout(DROPOUT_RATE))

	#SECOND LAYER OF CONVNETS, POOLING, DROPOUT 
	#  apply a NUM_CONV x NUM_CONF convolution with NUM_FILTERS output
	nn.add(Convolution2D( NUM_FILTERS, NUM_CONV, NUM_CONV))
	nn.add(Activation('relu'))
	nn.add(Convolution2D(NUM_FILTERS, NUM_CONV, NUM_CONV))
	nn.add(Activation('relu'))
	nn.add(MaxPooling2D(pool_size = (NUM_POOL, NUM_POOL) ))
	nn.add(Dropout(DROPOUT_RATE))
	 
	# FLATTEN the shape for dense connections 
	nn.add(Flatten())
	 
	# FIRST HIDDEN LAYER OF DENSE NETWORK
	nn.add(Dense(NUM_HIDDEN))  
	nn.add(Activation('relu'))
	nn.add(Dropout(DROPOUT_RATE))          

	# OUTFUT LAYER with NUM_CLASSES OUTPUTS
	# ACTIVATION IS SOFTMAX, REGULARIZATION IS L2
	nn.add(Dense(NUM_CLASSES, W_regularizer=REGULARIZER))
	nn.add(Activation('softmax') )

	#summary
	nn.summary()
	#plot the model
	plot(nn)

	# set an early-stopping value
	early_stopping = EarlyStopping(monitor='val_loss', patience=2)

	# COMPILE THE MODEL
	#   loss_function is categorical_crossentropy
	#   optimizer is parametric
	nn.compile(loss='categorical_crossentropy', 
		optimizer=OPTIMIZER, metrics=["accuracy"])

	start = time.time()
	# FIT THE MODEL WITH VALIDATION DATA
	fitlog = nn.fit(X_train, Y_train, \
		batch_size=BATCH_SIZE, nb_epoch=NUM_EPOCHS, \
		verbose=VERBOSE, validation_split=VALIDATION_SPLIT, \
		callbacks=[early_stopping])
	elapsed = time.time() - start

	# Test the network
	results = nn.evaluate(X_test, Y_test, verbose=VERBOSE)
	print('accuracy:', results[1])

	# just to get the list of input parameters and their value
	frame = inspect.currentframe()
	args, _, _, values = inspect.getargvalues(frame)
	# used for printing pretty arguments

	print_Graph(fitlog, elapsed, args, values)

	return fitlog  
Exemplo n.º 44
0
model.add(Flatten())
model.add(Dropout(0.1))
model.add(Dense(150))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, samples_per_epoch = \
            len(train_samples) * 6, validation_data=validation_generator, \
            nb_val_samples=len(validation_samples) * 6, nb_epoch=5)

model.save('model.h5')

print(history_object.history.keys())

# plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.grid()
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('training.png')
plt.show()

# plot the CNN architecture
plot(model, to_file='model.png')
                      name="encode1")
encode1_tensor = encode1_layer(raw_input_tensor)

decode1_layer = Dense(output_dim=args.patch_length * 2,
                      activation='linear',
                      name="decode1")
decode1_tensor = decode1_layer(encode1_tensor)

# First model is input -> encode1 -> decode1:
autoencoder1 = Model(input=raw_input_tensor, output=decode1_tensor)
autoencoder1.compile(optimizer='adadelta', loss='mse')
if args.net_plot:
    pngname = '%s/keras_autoencoder1.png' % (args.output_dir, )
    epsname = '%s/keras_autoencoder1.eps' % (args.output_dir, )
    print("Saving %s..." % (pngname, ))
    plot(autoencoder1, to_file=pngname, show_shapes=True)
    print("Saving %s..." % (epsname, ))
    plot(autoencoder1, to_file=epsname, show_shapes=True)

if train_net:
    # Train first autoencoder on raw input.
    autoencoder1.fit(x_train,
                     x_train,
                     nb_epoch=500,
                     batch_size=256,
                     shuffle=True,
                     validation_data=(x_val, x_val))

# Stack the 2nd autoencoder (connecting to encode1):
encode2_layer = Dense(hidden2,
                      activation='sigmoid',
    x = MaxPooling2D((2,2))(x)

    for i in range(N):
        x = conv3_block(x, k, dropout)
        nb_conv += 2

    x = AveragePooling2D((8,8))(x)
    x = Flatten()(x)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(ip, x)

    if verbose: print("Wide Residual Network-%d-%d created." % (nb_conv, k))
    return model

if __name__ == "__main__":
    from keras.utils.visualize_util import plot
    from keras.layers import Input
    from keras.models import Model

    init = (3, 32, 32)

    wrn_28_10 = create_wide_residual_network(init, nb_classes=100, N=4, k=10, dropout=0.25)

    model = Model(init, wrn_28_10)

    model.summary()
    plot(model, "WRN-28-10.png", show_shapes=True, show_layer_names=True)
        type=str,
        help='Path to model h5 file. Model should be on the same path.')
    parser.add_argument(
        'image_folder',
        type=str,
        nargs='?',
        default='',
        help=
        'Path to image folder. This is where the images from the run will be saved.'
    )
    args = parser.parse_args()

    # check that model Keras version is same as local Keras version
    f = h5py.File(args.model, mode='r')
    model_version = f.attrs.get('keras_version')
    keras_version = str(keras_version).encode('utf8')

    if model_version != keras_version:
        print('You are using Keras version ', keras_version,
              ', but the model was built using ', model_version)

    model = load_model(args.model)

    if not pydot.find_graphiz():
        raise ImportError('Install pydot and graphviz.')
    else:
        plot(model,
             to_file='model.png',
             show_shapes=True,
             show_layer_names=False)
Exemplo n.º 48
0
x=Activation('relu')(x)
seq_output=Flatten()(x)

print('INFO - %s'%('building concatenate model.'))
units=512
x=Merge(mode='concat',concat_axis=1)([reg_output,seq_output])
x=Dense(units,activation='relu')(x)
x=Dropout(0.5,seed=42)(x)
x=Dense(units,activation='relu')(x)
x=Dropout(0.5,seed=42)(x)
rgs_output=Dense(1,activation='linear',name='rgs_output')(x)


model=Model(input=[reg_input,seq_input],output=[rgs_output])
model.compile(loss={'rgs_output':'mean_squared_error'},optimizer='sgd')
plot(model, show_shapes=True,to_file='%s/model.eps'%(fig_dir))

print('INFO - %s'%('loading data.'))
train,val,test=input_data.read_data_sets(train_pct=80,val_pct=10,test_pct=10)


print('INFO - %s'%('training model.'))
reduce_lr=ReduceLROnPlateau(verbose=1,factor=0.5, patience=5)
early_stopping=EarlyStopping(monitor='val_loss',patience=10)
checkpoint=ModelCheckpoint(filepath="%s/model.{epoch:02d}-{val_loss:.4f}.hdf5"%(log_dir), monitor='val_loss')
batchhistory=BatchHistory(val_data=val,loss_function='mse',every_n_batch=1000)
history=model.fit({'seq_input':train['seq'],'reg_input':train['reg']},{'rgs_output':train['expr']},
	validation_data=({'seq_input':val['seq'],'reg_input':val['reg']},{'rgs_output':val['expr']}),
	nb_epoch=100,
	batch_size=100,
	callbacks=[early_stopping,checkpoint,reduce_lr,batchhistory],
Exemplo n.º 49
0
def train():
    X = np.load('X_train.npy')
    y = np.load('y_train.npy')

    X = np.reshape(X, (30, 512, 512, 1))
    # tf dim_ordering
    y = np.reshape(y, (30, 512, 512, 1))
    # tf dim_ordering

    # Extend radius
    radius = 64
    X = np.pad(X, ((0, 0), (radius, radius), (radius, radius), (0, 0)),
               'reflect')
    # tf dim_ordering
    y = np.pad(y, ((0, 0), (radius, radius), (radius, radius), (0, 0)),
               'reflect')
    # tf dim_ordering

    # Preprocess the label
    y = y / 255

    y = y.astype('float32')
    X = X.astype('float32')

    # Merging 7 times
    X0 = X
    y0 = y

    X1 = X[::1, ::1, ::-1, ::1]
    y1 = y[::1, ::1, ::-1, ::1]

    X2 = X[::1, ::-1, ::1, ::1]
    y2 = y[::1, ::-1, ::1, ::1]

    X3 = np.transpose(X, (0, 2, 1, 3))
    y3 = np.transpose(y, (0, 2, 1, 3))

    X4 = scipy.ndimage.interpolation.rotate(X, angle=90, axes=(1, 2))
    y4 = scipy.ndimage.interpolation.rotate(y, angle=90, axes=(1, 2))

    X5 = scipy.ndimage.interpolation.rotate(X, angle=180, axes=(1, 2))
    y5 = scipy.ndimage.interpolation.rotate(y, angle=180, axes=(1, 2))

    X6 = scipy.ndimage.interpolation.rotate(X, angle=270, axes=(1, 2))
    y6 = scipy.ndimage.interpolation.rotate(y, angle=270, axes=(1, 2))

    X = np.concatenate((X0, X1, X2, X3, X4, X5, X6), axis=0)
    y = np.concatenate((y0, y1, y2, y3, y4, y5, y6), axis=0)
    ##########
    print("Y median", np.median(y))
    print("X shape", X.shape)
    print("X dtype", X.dtype)
    print("Y shape", y.shape)
    print("Y dtype", y.dtype)

    nb_iter = 1001
    epochs_per_iter = 2
    batch_size = 1

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    model = get_uplusnet()
    model.reset_states()
    # model.load_weights("model_300.hdf5")
    # graph = to_graph(model, show_shape=True)
    # graph.write_png("model.png")
    plot(model, to_file='model.png', show_shapes=True)

    nb_folds = 3
    # kfolds 		= KFold(len(y), nb_folds)
    kfolds = KFold(nb_folds)

    # Perform cross validation on the data
    for iter in range(nb_iter):
        print('-' * 50)
        print('Iteration {0}/{1}'.format(iter + 1, nb_iter))
        print('-' * 50)

        # Shuffle the data
        print('Shuffle data...')
        seed = np.random.randint(1, 10e6)
        np.random.seed(seed)
        np.random.shuffle(X)
        np.random.seed(seed)
        np.random.shuffle(y)

        f = 0
        for train, valid in kfolds.split(X):
            # for train, valid in kfolds:
            print('=' * 50)
            print('Fold', f + 1)
            f += 1

            # Extract train, validation set
            X_train = X[train]
            X_valid = X[valid]
            y_train = y[train]
            y_valid = y[valid]

            print('Augmenting data for training...')
            X_train, y_train = augment_data(
                X_train, y_train)  # Data augmentation for training
            X_valid, y_valid = augment_data(
                X_valid, y_valid)  # Data augmentation for training

            print("X_train", X_train.shape)
            print("y_train", y_train.shape)

            # Normalize
            # X_train = X_train/255.0
            # X_valid = X_valid/255.0
            from keras.preprocessing.image import ImageDataGenerator
            train_datagen = ImageDataGenerator(
                featurewise_center=False,
                samplewise_center=True,
                featurewise_std_normalization=False,
                samplewise_std_normalization=True,
                rescale=1 / 255.0)
            train_datagen.fit(X_train)
            valid_datagen = ImageDataGenerator(
                featurewise_center=False,
                samplewise_center=True,
                featurewise_std_normalization=False,
                samplewise_std_normalization=True,
                rescale=1 / 255.0)
            valid_datagen.fit(X_valid)

            # checkpoint the best model
            filepath = "model-best.hdf5"
            checkpoint = ModelCheckpoint(filepath,
                                         monitor='val_loss',
                                         verbose=1,
                                         save_best_only=True,
                                         mode='min')

            callbacks_list = [
                TensorBoard(
                    log_dir='/home/Pearl/quantm/isbi12_210_keras_uplus/logs/')
            ]

            # datagen = ImageDataGenerator(
            # 	    samplewise_center=True,
            # 	    samplewise_std_normalization=True,
            # 	    zca_whitening=True)

            history = model.fit(X_train,
                                y_train,
                                verbose=1,
                                shuffle=True,
                                nb_epoch=epochs_per_iter,
                                batch_size=batch_size,
                                callbacks=callbacks_list,
                                validation_data=(X_valid, y_valid))
            # history = model.fit_generator(
            # 				train_datagen.flow(X_train, y_train, batch_size=batch_size),
            # 				verbose=1,
            # 				samples_per_epoch=len(X_train),
            # 				nb_epoch=epochs_per_iter,
            # 				callbacks=callbacks_list,
            # 				validation_data=valid_datagen.flow(X_valid, y_valid, batch_size=batch_size),
            # 				nb_val_samples=len(X_valid)
            # 				)
            # list all data in history
            # print(history.history.keys())
            # Plot
            # # summarize history for accuracy
            # plt.plot(history.history['acc'])
            # plt.plot(history.history['val_acc'])
            # plt.title('model accuracy')
            # plt.ylabel('accuracy')
            # plt.xlabel('epoch')
            # plt.legend(['train', 'test'], loc='upper left')
            # plt.show()
            # summarize history for loss
            # plt.plot(history.history['loss'])
            # plt.plot(history.history['val_loss'])
            # plt.title('model loss')
            # plt.ylabel('loss')
            # plt.xlabel('epoch')
            # plt.legend(['train', 'test'], loc='upper left')
            # plt.show()
        if iter % 10 == 0:
            fname = 'models/model_%03d.hdf5' % (iter)
            model.save_weights(fname, overwrite=True)
Exemplo n.º 50
0
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
from keras import backend as K
from keras.utils.visualize_util import plot
import numpy as np
import matplotlib.pyplot as plt
#load VGG model
base_model = VGG16(weights='imagenet')
#visalize topology in an image
plot(base_model, to_file='modelVGG16.png', show_shapes=True, show_layer_names=True)
#read and process image
img_path = '/data/MIT/test/coast/art1130.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
#crop the model up to a certain layer
model = Model(input=base_model.input, output=base_model.get_layer('block5_conv2').output)
#get the features from images
features = model.predict(x)
if K.image_dim_ordering() == 'th':
    #theano and thensorflow deal with tensor in different order
weights = base_model.get_layer('block1_conv1').get_weights()
Exemplo n.º 51
0
    aux_out = Flatten()(aux_out)
    aux_out = Dense(nb_classes, activation='softmax')(aux_out)

    # Reduction Resnet B
    x = reduction_resnet_B(x)

    # 5 x Inception Resnet C
    for i in range(5):
        x = inception_resnet_C(x, scale_residual=scale)

    # Average Pooling
    x = AveragePooling2D((8,8))(x)

    # Dropout
    x = Dropout(0.8)(x)
    x = Flatten()(x)

    # Output
    out = Dense(output_dim=nb_classes, activation='softmax')(x)

    model = Model(init, output=[out, aux_out], name='Inception-Resnet-v1')

    return model

if __name__ == "__main__":
    from keras.utils.visualize_util import plot

    inception_resnet_v1 = create_inception_resnet_v1()

    plot(inception_resnet_v1, to_file="Inception ResNet-v1.png", show_shapes=True)
Exemplo n.º 52
0
        logger.info('Initializing lookup table')
        emb_reader = EmbReader(emb_path, emb_dim=emb_dim)

        model_layer_index = 0
        for test in model.layers:
            if (test.name == 'SplusEmbedding' or test.name == 'SminusEmbedding'
                    or test.name == 'TplusEmbedding'
                    or test.name == 'TminusEmbedding'):
                model.layers[model_layer_index].W.set_value(
                    emb_reader.get_emb_matrix_given_vocab(
                        vocab, model.layers[model_layer_index].W.get_value()))
            model_layer_index += 1

model.compile(loss=loss, optimizer=optimizer, metrics=[metric])

plot(model, to_file=out_dir + '/model.png')

logger.info('  Done')

###############################################################################################################################
## Save model architecture
#

logger.info('Saving model architecture')
with open(out_dir + '/model_arch.json', 'w') as arch:
    arch.write(model.to_json(indent=2))

logger.info(
    '---------------------------------------------------------------------------------------'
)
Exemplo n.º 53
0
    if args.train is not None:
        do_training = True
    if args.model is not None:
        load_model = True
        model_to_load = args.model
    if args.epochs is not None:
        nb_epoch = args.epochs

    print("command line arguments")
    print(args)

    train_indices, validation_indices, test_indices, labels, filenames = load_data(fail_data, pass_data)

    model = qc_model()
    model.summary()
    plot(model, to_file="model.png")

    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=4, min_lr=0.001)
    stop_early = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
    model_checkpoint = ModelCheckpoint(images_dir + "models/best_model.hdf5", monitor="val_acc", verbose=0, save_best_only=True, save_weights_only=False, mode='auto')

    hist = model.fit_generator(batch(train_indices, labels, 2,True), nb_epoch=400, samples_per_epoch=len(train_indices), validation_data=batch(validation_indices, labels, 2), nb_val_samples=len(validation_indices), callbacks=[model_checkpoint], class_weight = {0:.7, 1:.3})


    model.load_weights(images_dir + 'models/best_model.hdf5')


    test_scores = []
    sensitivities = []
    specificities = []
Exemplo n.º 54
0
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils.visualize_util import plot

model = Sequential()
model.add(Dense(32, input_dim=500))
model.add(Activation(activation='sigmoid'))
model.add(Dense(1))
model.add(Activation(activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mean_squared_error'])

data = np.random.random((1000, 500))
labels = np.random.randint(2, size=(1000, 1))

score = model.evaluate(data, labels, verbose=0)
print "Before Training:", zip(model.metrics_names, score)

model.fit(data, labels, nb_epoch=10, batch_size=32, verbose=0)

score = model.evaluate(data, labels, verbose=0)
print "After Training:", zip(model.metrics_names, score)

plot(model, to_file='s4.png', show_shapes=True)

# Before Training: [('loss', 0.26870122766494753), ('mean_squared_error', 0.26870122766494753)]
# After Training: [('loss', 0.22180086207389832), ('mean_squared_error', 0.22180086207389832)]
Exemplo n.º 55
0
from keras.utils.visualize_util import plot
from keras.models import load_model
import argparse

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Network Diagram')
    parser.add_argument('-m', '--model', type=str, help='Path to h5 model file.')
    args = parser.parse_args()

    net = load_model(args.model)

    print("Writing out 'model.png'.")
    plot(net, to_file='model.png')
Exemplo n.º 56
0
model.add(pool2)

model.add(Permute((2, 1, 3)))
model.add(Reshape((56, 40 * 56)))

model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.summary()

modelname = 'image-method'

plot(model,
     to_file='/home/xcat/experiment/ImageNet/model/' + modelname + '.png',
     show_shapes=True)

opt = Adam()
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
checkpointer = ModelCheckpoint(
    filepath='/home/xcat/experiment/ImageNet/model/' + modelname + 'h5',
    verbose=1,
    save_best_only=True,
    monitor='val_acc')

model.fit(X_train,
          Y_train,
          batch_size=batch_size,
Exemplo n.º 57
0
def run_cifar10(batch_size,
                nb_epoch,
                depth,
                nb_dense_block,
                nb_filter,
                growth_rate,
                dropout_rate,
                learning_rate,
                weight_decay,
                plot_architecture):
    """ Run CIFAR10 experiments

    :param batch_size: int -- batch size
    :param nb_epoch: int -- number of training epochs
    :param depth: int -- network depth
    :param nb_dense_block: int -- number of dense blocks
    :param nb_filter: int -- initial number of conv filter
    :param growth_rate: int -- number of new filters added by conv layers
    :param dropout_rate: float -- dropout rate
    :param learning_rate: float -- learning rate
    :param weight_decay: float -- weight decay
    :param plot_architecture: bool -- whether to plot network architecture

    """

    ###################
    # Data processing #
    ###################

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    nb_classes = len(np.unique(y_train))
    img_dim = X_train.shape[1:]

    if K.image_dim_ordering() == "th":
        n_channels = X_train.shape[1]
    else:
        n_channels = X_train.shape[-1]

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')

    # Normalisation
    X = np.vstack((X_train, X_test))
    # 2 cases depending on the image ordering
    if K.image_dim_ordering() == "th":
        for i in range(n_channels):
            mean = np.mean(X[:, i, :, :])
            std = np.std(X[:, i, :, :])
            X_train[:, i, :, :] = (X_train[:, i, :, :] - mean) / std
            X_test[:, i, :, :] = (X_test[:, i, :, :] - mean) / std

    elif K.image_dim_ordering() == "tf":
        for i in range(n_channels):
            mean = np.mean(X[:, :, :, i])
            std = np.std(X[:, :, :, i])
            X_train[:, :, :, i] = (X_train[:, :, :, i] - mean) / std
            X_test[:, :, :, i] = (X_test[:, :, :, i] - mean) / std

    ###################
    # Construct model #
    ###################

    model = densenet.DenseNet(nb_classes,
                              img_dim,
                              depth,
                              nb_dense_block,
                              growth_rate,
                              nb_filter,
                              dropout_rate=dropout_rate,
                              weight_decay=weight_decay)
    # Model output
    model.summary()

    # Build optimizer
    opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=["accuracy"])

    if plot_architecture:
        from keras.utils.visualize_util import plot
        plot(model, to_file='./figures/densenet_archi.png', show_shapes=True)

    ####################
    # Network training #
    ####################

    print("Training")

    list_train_loss = []
    list_test_loss = []
    list_learning_rate = []

    for e in range(nb_epoch):

        if e == int(0.5 * nb_epoch):
            K.set_value(model.optimizer.lr, np.float32(learning_rate / 10.))

        if e == int(0.75 * nb_epoch):
            K.set_value(model.optimizer.lr, np.float32(learning_rate / 100.))

        split_size = batch_size
        num_splits = X_train.shape[0] / split_size
        arr_splits = np.array_split(np.arange(X_train.shape[0]), num_splits)

        l_train_loss = []
        start = time.time()

        for batch_idx in arr_splits[:10]:

            X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
            train_logloss, train_acc = model.train_on_batch(X_batch, Y_batch)

            l_train_loss.append([train_logloss, train_acc])

        test_logloss, test_acc = model.evaluate(X_test,
                                                Y_test,
                                                verbose=0,
                                                batch_size=64)
        list_train_loss.append(np.mean(np.array(l_train_loss), 0).tolist())
        list_test_loss.append([test_logloss, test_acc])
        list_learning_rate.append(float(K.get_value(model.optimizer.lr)))
        # to convert numpy array to json serializable
        print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))

        d_log = {}
        d_log["batch_size"] = batch_size
        d_log["nb_epoch"] = nb_epoch
        d_log["optimizer"] = opt.get_config()
        d_log["train_loss"] = list_train_loss
        d_log["test_loss"] = list_test_loss
        d_log["learning_rate"] = list_learning_rate

        json_file = os.path.join('./log/experiment_log_cifar10.json')
        with open(json_file, 'w') as fp:
            json.dump(d_log, fp, indent=4, sort_keys=True)
Exemplo n.º 58
0
def train():
    #
    #  SaveModel is a CallBack class that we can use to save the model for each epoch
    #  This allows us to easily test each epoch on the simulator. The simulator seems like
    #  a better validation than just the validation data set
    class SaveModel(Callback):
        def on_epoch_end(self, epoch, logs={}):
            epoch += 1
            if (epoch > 0):
                #with open ('model-' + str(epoch) + '.json', 'w') as file:
                #    file.write (model.to_json ())
                #    file.close ()
                #model.save_weights ('model-' + str(epoch) + '.h5')
                model.save('model-' + str(epoch) + '.h5')

    #
    #  load the model
    #
    model = get_model()

    #  Keras has a nice tool to create an image of our network
    from keras.utils.visualize_util import plot
    plot(model, to_file='car_model.png', show_shapes=True)

    print("Loaded model")

    # load the data
    xs, ys = data.loadTraining()

    # split the dataset into training and validation  80% / 20%
    train_xs = xs[:int(len(xs) * 0.8)]
    train_ys = ys[:int(len(xs) * 0.8)]

    val_xs = xs[-int(len(xs) * 0.2):]
    val_ys = ys[-int(len(xs) * 0.2):]

    # load the validation dataset, it is better not generate an image each time - process them once
    # Use the validation process function, it doesn't augment the image, just resizes it
    X, y = data.getValidationDataset(val_xs, val_ys,
                                     data.processImageValidation)

    print(model.summary())
    print("Loaded validation datasetset")
    print("Total of", len(train_ys))
    print("Training..")

    checkpoint_path = "weights.{epoch:02d}-{val_loss:.2f}.hdf5"
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 verbose=1,
                                 save_best_only=False,
                                 save_weights_only=False,
                                 mode='auto')

    # I tried using the earlystopping callback, but now I run it for a fixed number of epochs and test to see which is best
    earlystopping = EarlyStopping(monitor='val_loss',
                                  min_delta=0,
                                  patience=2,
                                  verbose=1,
                                  mode='auto')

    res = model.fit_generator(data.generator(train_xs, train_ys, 256),
                              validation_data=(X, y),
                              samples_per_epoch=100 * 256,
                              nb_epoch=epochs,
                              verbose=1,
                              callbacks=[SaveModel()])

    # pickle and dump the history so we can graph it in a notebook
    history = res.history
    with open('history.p', 'wb') as f:
        pickle.dump(history, f)
Exemplo n.º 59
0
# Set the calculated weights and biases
model.get_layer('h1').set_weights(W1b1)
model.get_layer('h2').set_weights(W2b2)
model.get_layer('h3').set_weights(W3b3)
model.get_layer('h4').set_weights(W4b4)
model.get_layer('h5').set_weights(W5b5)
model.get_layer('h6').set_weights(W6b6)
model.get_layer('out').set_weights(Wobo)



# Compile model
current_dir = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(current_dir, "FullyConnectedNetworkPrelu.png")
plot(model, to_file=model_path, show_shapes=True)
adam=keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='binary_crossentropy', optimizer=adam,metrics=['categorical_accuracy'])

# learning schedule callback
history=History()
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate,history]

#model Fitting
print "Training..."
model.fit(X_train, y_train,validation_data=(X_test,y_test),nb_epoch=2000, batch_size=X_train.shape[0], callbacks=callbacks_list, verbose=1)
#model.fit(X_train, y_train,validation_data=(X_test,y_test),nb_epoch=550, batch_size=X_train.shape[0],class_weight={0:1, 1:6756.0/271}, callbacks=callbacks_list, verbose=1)

#Model prediction
predicted=model.predict_proba(X_test,batch_size=25)
Exemplo n.º 60
0
    def make_one_net_model(self, cf, in_shape, loss, metrics, optimizer):
        # Create the *Keras* model
        if cf.model_name == 'fcn8':
            model = build_fcn8(
                in_shape,
                cf.dataset.n_classes,
                cf.weight_decay,
                freeze_layers_from=cf.freeze_layers_from,
                #path_weights='weights/pascal-fcn8s-dag.mat')
                path_weights=None)
        elif cf.model_name == 'unet':
            model = build_unet(in_shape,
                               cf.dataset.n_classes,
                               cf.weight_decay,
                               freeze_layers_from=cf.freeze_layers_from,
                               path_weights=None)
        elif cf.model_name == 'segnet_basic':
            model = build_segnet(in_shape,
                                 cf.dataset.n_classes,
                                 cf.weight_decay,
                                 freeze_layers_from=cf.freeze_layers_from,
                                 path_weights=None,
                                 basic=True)
        elif cf.model_name == 'segnet_vgg':
            model = build_segnet(in_shape,
                                 cf.dataset.n_classes,
                                 cf.weight_decay,
                                 freeze_layers_from=cf.freeze_layers_from,
                                 path_weights=None,
                                 basic=False)
        elif cf.model_name == 'resnetFCN':
            model = build_resnetFCN(in_shape,
                                    cf.dataset.n_classes,
                                    cf.weight_decay,
                                    freeze_layers_from=cf.freeze_layers_from,
                                    path_weights=None)
        elif cf.model_name == 'densenetFCN':
            model = build_densenetFCN(in_shape,
                                      cf.dataset.n_classes,
                                      cf.weight_decay,
                                      freeze_layers_from=cf.freeze_layers_from,
                                      path_weights=None)
        elif cf.model_name == 'lenet':
            model = build_lenet(in_shape, cf.dataset.n_classes,
                                cf.weight_decay)
        elif cf.model_name == 'alexNet':
            model = build_alexNet(in_shape, cf.dataset.n_classes,
                                  cf.weight_decay)
        elif cf.model_name == 'vgg16':
            model = build_vgg(in_shape,
                              cf.dataset.n_classes,
                              16,
                              cf.weight_decay,
                              load_pretrained=cf.load_imageNet,
                              freeze_layers_from=cf.freeze_layers_from)
        elif cf.model_name == 'vgg19':
            model = build_vgg(in_shape,
                              cf.dataset.n_classes,
                              19,
                              cf.weight_decay,
                              load_pretrained=cf.load_imageNet,
                              freeze_layers_from=cf.freeze_layers_from)
        elif cf.model_name == 'resnet50':
            model = build_resnet50(in_shape,
                                   cf.dataset.n_classes,
                                   cf.weight_decay,
                                   load_pretrained=cf.load_imageNet,
                                   freeze_layers_from=cf.freeze_layers_from)
        elif cf.model_name == 'InceptionV3':
            model = build_inceptionV3(in_shape,
                                      cf.dataset.n_classes,
                                      cf.weight_decay,
                                      load_pretrained=cf.load_imageNet,
                                      freeze_layers_from=cf.freeze_layers_from)
        elif cf.model_name == 'yolo':
            model = build_yolo(in_shape,
                               cf.dataset.n_classes,
                               cf.dataset.n_priors,
                               load_pretrained=cf.load_imageNet,
                               freeze_layers_from=cf.freeze_layers_from)
        else:
            raise ValueError('Unknown model')

        # Load pretrained weights
        if cf.load_pretrained:
            print('   loading model weights from: ' + cf.weights_file + '...')
            model.load_weights(cf.weights_file, by_name=True)

        # Compile model
        model.compile(loss=loss, metrics=metrics, optimizer=optimizer)

        # Show model structure
        if cf.show_model:
            model.summary()
            plot(model, to_file=os.path.join(cf.savepath, 'model.png'))

        # Output the model
        print('   Model: ' + cf.model_name)
        # model is a keras model, Model is a class wrapper so that we can have
        # other models (like GANs) made of a pair of keras models, with their
        # own ways to train, test and predict
        return One_Net_Model(model, cf, optimizer)