Ejemplo n.º 1
0
    def main(self):
        read_flags()

        if FLAGS.load_model:
            self.model = self.restore_from_saved(FLAGS.load_model)
        else:
            self.model = self.make_model()

        self.model.summary()

        self.train = load_ava(FLAGS.train)
        self.val = load_ava(FLAGS.val)

        log.info('Got %d samples for training, %d for validation',
                 len(self.train), len(self.val))

        self.train, median = filter_dataset(self.train, FLAGS.qgap)
        self.val, _ = filter_dataset(self.val, 0, median)

        log.info('Setting up model for %d GPUs', FLAGS.gpus)
        # self.trainable_model = keras_util.TensorboardEnabledModel(
        #     input=self.model.input, output=self.model.output)
        self.trained_model = self.make_trained_model()
        if FLAGS.gpus > 1:
            self.trained_model = multi_gpu.make_parallel(
                self.trained_model, FLAGS.gpus)
        self.batch_size = FLAGS.batch_size * FLAGS.gpus

        self.compile_model(self.trained_model)

        self.do_train()
def lstm_network_ii(num_classes, num_hidden_units, max_len, word_dim, dropout):
    num_hidden_units = num_hidden_units
    dropout = dropout
    max_len = max_len  #### Yet to test if it will work for variable lengths, no it doesnt work. Will have to fix it to 30
    word_dim = word_dim
    num_classes = num_classes
    model = Sequential()
    model.add(
        LSTM(num_hidden_units,
             activation='tanh',
             return_sequences=True,
             input_shape=(max_len, word_dim)))
    model.add(Dropout(dropout))
    model.add(LSTM(num_hidden_units, return_sequences=True, activation='tanh'))
    model.add(Dropout(dropout))
    model.add(LSTM(num_hidden_units, return_sequences=True, activation='tanh'))
    model.add(Dropout(dropout))
    model.add(
        LSTM(num_hidden_units, return_sequences=False, activation='tanh')
    )  ### return_seq indicates if we require output at each time step.
    model.add(Dropout(dropout))
    model.add(Dense(num_classes, init='uniform'))
    model.add(Activation('softmax'))

    model = make_parallel(model, 2)
    print 'Model Compilation started'
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop')  #### rmsprop better than sgd for rnns
    print 'Model compilation done'

    return model
def lstm_cnn_network(num_classes, num_hidden_units, max_len, word_dim, dropout,
                     img_dim, num_fully_units):
    image_model = Sequential()
    image_model.add(Reshape((img_dim, ), input_shape=(img_dim, )))

    lstm_model = Sequential()
    lstm_model.add(
        LSTM(num_hidden_units,
             activation='tanh',
             return_sequences=True,
             input_shape=(max_len, word_dim)))
    lstm_model.add(Dropout(0.2))
    lstm_model.add(
        LSTM(num_hidden_units, return_sequences=True, activation='tanh')
    )  ### return_seq indicates if we require output at each time step.
    lstm_model.add(Dropout(0.2))
    lstm_model.add(
        LSTM(num_hidden_units, return_sequences=False, activation='tanh'))
    lstm_model.add(Dropout(0.2))
    #lstm_model.add(LSTM(num_hidden_units,return_sequences=False,activation='tanh'))
    #lstm_model.add(Dropout(dropout))

    combined_model = Sequential()
    combined_model.add(
        Merge([image_model, lstm_model], mode='concat', concat_axis=1))
    combined_model.add(Dense(num_fully_units, init='glorot_uniform'))
    combined_model.add(Activation('tanh'))
    combined_model.add(Dropout(dropout))
    combined_model.add(Dense(num_fully_units, init='glorot_uniform'))
    combined_model.add(Activation('tanh'))
    combined_model.add(Dropout(dropout))
    #combined_model.add(Dense(num_fully_units,init='glorot_uniform'))
    #combined_model.add(Activation('tanh'))
    #combined_model.add(Dropout(dropout))
    combined_model.add(Dense(num_classes, init='glorot_uniform'))
    combined_model.add(Activation('softmax'))

    combined_model = make_parallel(combined_model, 2)
    print 'Model Compilation started'
    combined_model.compile(
        loss='categorical_crossentropy',
        optimizer='rmsprop')  #### rmsprop better than sgd for rnns
    print 'Model compilation done'

    return combined_model
Ejemplo n.º 4
0
base_model = InceptionV3(include_top=False,
                         input_shape=(IMG_SIZE, IMG_SIZE, 3),
                         pooling='avg',
                         weights='imagenet')
for layer in base_model.layers:
    layer.trainable = False

x = base_model.output
x = Dense(2048, activation='relu')(x)
x = Dropout(0.25)(x)
x = Dense(17)(x)
output = Lambda(my_activation)(x)

optimizer = Adam(0.001, decay=0.0003)
model = Model(inputs=base_model.inputs, outputs=output)
model = make_parallel(model, 2)

model.compile(loss=my_loss,
              optimizer=optimizer,
              metrics=['accuracy', fbeta_score_K])

try:
    model.fit_generator(
        generator=batch_generator_train(list(zip(x_train, y_train)), IMG_SIZE,
                                        BATCH),
        steps_per_epoch=np.ceil(len(x_train) / BATCH),
        epochs=1,
        verbose=1,
        validation_data=batch_generator_train(list(zip(x_valid, y_valid)),
                                              IMG_SIZE, 16),
        validation_steps=np.ceil(len(x_valid) / 16),
Ejemplo n.º 5
0
def main():
    from keras.layers import Dense, Conv1D, Activation, GlobalMaxPooling1D, Input, Embedding, Multiply
    from keras.models import Model
    from keras import backend as K
    from keras import metrics
    import multi_gpu
    import os
    import math
    import random
    import argparse
    import os
    import numpy as np
    import requests

    batch_size = 100
    input_dim = 257  # every byte plus a special padding symbol
    padding_char = 256

    parser = argparse.ArgumentParser()
    parser.add_argument('--gpus', help='number of GPUs', default=1)

    args = parser.parse_args()
    ngpus = int(args.gpus)

    if os.path.exists('malconv.h5'):
        print("restoring malconv.h5 from disk for continuation training...")
        from keras.models import load_model
        basemodel = load_model('malconv.h5')
        _, maxlen, embedding_size = basemodel.layers[1].output_shape
        input_dim
    else:
        maxlen = 2**20  # 1MB
        embedding_size = 8

        # define model structure
        inp = Input(shape=(maxlen, ))
        emb = Embedding(input_dim, embedding_size)(inp)
        filt = Conv1D(filters=128,
                      kernel_size=500,
                      strides=500,
                      use_bias=True,
                      activation='relu',
                      padding='valid')(emb)
        attn = Conv1D(filters=128,
                      kernel_size=500,
                      strides=500,
                      use_bias=True,
                      activation='sigmoid',
                      padding='valid')(emb)
        gated = Multiply()([filt, attn])
        feat = GlobalMaxPooling1D()(gated)
        dense = Dense(128, activation='relu')(feat)
        outp = Dense(1, activation='sigmoid')(dense)

        basemodel = Model(inp, outp)

    basemodel.summary()

    print("Using %i GPUs" % ngpus)

    if ngpus > 1:
        model = multi_gpu.make_parallel(basemodel, ngpus)
    else:
        model = basemodel

    from keras.optimizers import SGD
    model.compile(loss='binary_crossentropy',
                  optimizer=SGD(lr=0.01,
                                momentum=0.9,
                                nesterov=True,
                                decay=1e-3),
                  metrics=[metrics.binary_accuracy])

    def bytez_to_numpy(bytez, maxlen):
        b = np.ones((maxlen, ), dtype=np.uint16) * padding_char
        bytez = np.frombuffer(bytez[:maxlen], dtype=np.uint8)
        b[:len(bytez)] = bytez
        return b

    def getfile_service(sha256, url=None, maxlen=maxlen):
        if url is None:
            raise NotImplementedError(
                "You must provide your own url for getting file bytez by sha256"
            )
        r = requests.get(url, params={'sha256': sha256})
        if not r.ok:
            return None
        return bytez_to_numpy(r.content, maxlen)

    def generator(hashes, labels, batch_size, shuffle=True):
        X = []
        y = []
        zipped = list(zip(hashes, labels))
        while True:
            if shuffle:
                random.shuffle(zipped)
            for sha256, l in zipped:
                x = getfile_service(sha256)
                if x is None:
                    continue
                X.append(x)
                y.append(l)
                if len(X) == batch_size:
                    yield np.asarray(X, dtype=np.uint16), np.asarray(y)
                    X = []
                    y = []

    import pandas as pd
    train_labels = pd.read_csv('ember_training.csv.gz')
    train_labels = train_labels[train_labels['y'] !=
                                -1]  # get only labeled samples
    labels = train_labels['y'].tolist()
    hashes = train_labels['sha256'].tolist()

    from sklearn.model_selection import train_test_split
    hashes_train, hashes_val, labels_train, labels_val = train_test_split(
        hashes, labels, test_size=200)

    train_gen = generator(hashes_train, labels_train, batch_size)
    val_gen = generator(hashes_val, labels_val, batch_size)

    from keras.callbacks import LearningRateScheduler

    base = K.get_value(model.optimizer.lr)

    def schedule(epoch):
        return base / 10.0**(epoch // 2)

    model.fit_generator(
        train_gen,
        steps_per_epoch=len(hashes_train) // batch_size,
        epochs=10,
        validation_data=val_gen,
        callbacks=[LearningRateScheduler(schedule)],
        validation_steps=int(math.ceil(len(hashes_val) / batch_size)),
    )

    basemodel.save('malconv.h5')

    test_labels = pd.read_csv('ember_test.csv.gz')
    labels_test = test_labels['y'].tolist()
    hashes_test = test_labels['sha256'].tolist()

    test_generator = generator(hashes_test,
                               labels_test,
                               batch_size=1,
                               shuffle=False)
    test_p = basemodel.predict_generator(test_generator,
                                         steps=len(test_labels),
                                         verbose=1)
Ejemplo n.º 6
0
        pointnet_points = args.pointnet_points
        model = get_model_pointnet(pointnet_points)
    else:
        model = get_model_recurrent(
            points_per_ring,
            len(rings),
            hidden_neurons=args.hidden_neurons,
            sector_splits=args.sector_splits,
            bidirectional_first_pass=args.bidirectional_first_pass)

if (args.gpus > 1) or (len(args.batch_size) > 1):
    assert K._backend == 'tensorflow'
    import multi_gpu

    if len(args.batch_size) == 1:
        model = multi_gpu.make_parallel(model, args.gpus)
        BATCH_SIZE = args.gpus * args.batch_size[0]
    else:
        BATCH_SIZE = sum(args.batch_size)
        model = multi_gpu.make_parallel(model,
                                        args.gpus,
                                        splits=args.batch_size)
else:
    BATCH_SIZE = args.batch_size[0]

if args.validate_split is None:
    if args.r2 or args.r3:
        validate_items = []
        if args.r2:
            validate_items.extend(
                get_items(provider_didi.get_tracklets(
Ejemplo n.º 7
0
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model


if __name__ == '__main__':
    model = VGG19(include_top=True, weights=None, classes=10, input_shape=(32, 32, 3))

    if NUM_GPU > 1:
        model = make_parallel(model, NUM_GPU)

    num_classes = 10

    data_augmentation = False

    # The data, shuffled and split between train and test sets:
    (x_train, y_train), (x_test, y_test) = load_data(DATASET_DIR)
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    y_train = to_categorical(y_train, num_classes)
    y_test = to_categorical(y_test, num_classes)

    x_train = x_train.astype('float32')
Ejemplo n.º 8
0
def get_unet(ngpus):
    inputs = Input((1, img_rows, img_cols))
    conv1 = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv4 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv5 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(up6)
    conv6 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up7)
    conv7 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(conv9)

    conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
    model = Model(input=inputs, output=conv10)
    if ngpus > 1:
        model = make_parallel(model, ngpus)


#Adam(lr=1.0e-5)
    sgd = optimizers.SGD(lr=0.01,
                         decay=1e-6,
                         momentum=0.9,
                         nesterov=True,
                         clipvalue=0.5)
    model.compile(optimizer=sgd,
                  loss=dice_coef_loss,
                  metrics=[dice_coef, sigma])

    return model
Ejemplo n.º 9
0
def cnn_TrainTest(no_of_epochs,
                  no_of_gpus,
                  train_b_size,
                  valid_b_size,
                  data_type,
                  experiment_folder,
                  input_data_dir,
                  data_sub_type='',
                  setSize=1,
                  width=128,
                  height=128,
                  chanels=3,
                  test_b_size=32):

    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
    K.set_session(sess)

    epochs = no_of_epochs  #50-100-150
    gpu_no = no_of_gpus  #1-2
    train_batch_size = train_b_size  #32
    valid_batch_size = valid_b_size  #32
    test_batch_size = test_b_size  #32

    setDimensions = setSize  #1
    img_height = width  #128
    img_width = height  #128
    channels = chanels  #3

    class_names = []

    datatype = data_type  #'Cepstr'
    exp_folder = experiment_folder  #'no_of_classes_111'

    if data_sub_type == '':
        train_data_dir = input_data_dir + datatype + '/' + exp_folder + '/train/'
        validation_data_dir = input_data_dir + datatype + '/' + exp_folder + '/validate/'
        test_data_dir = input_data_dir + datatype + '/' + exp_folder + '/test/'
        path = '/home/gpu/Documents/PycharmProjects/' + datatype + '/Results'
    else:
        train_data_dir = input_data_dir + datatype + '/' + data_sub_type + '/' + exp_folder + '/train/'
        validation_data_dir = input_data_dir + datatype + '/' + data_sub_type + '/' + exp_folder + '/validate/'
        test_data_dir = input_data_dir + datatype + '/' + data_sub_type + '/' + exp_folder + '/test/'
        path = '/home/gpu/Documents/PycharmProjects/' + datatype + '/Results/' + data_sub_type

    run_name = '_' + datatype + '_' + exp_folder + '_' + str(
        train_batch_size) + '_' + str(valid_batch_size)

    graphPath = path + '/' + exp_folder + '/Graph/' + run_name + '/'
    csvPath = path + '/' + exp_folder + '/'
    checkpointerPath = path + '/' + exp_folder + '/Model/'
    predictionsPath = path + '/' + exp_folder + '/'

    if not os.path.exists(graphPath):
        os.makedirs(graphPath)
    if not os.path.exists(csvPath):
        os.makedirs(csvPath)
    if not os.path.exists(checkpointerPath):
        os.makedirs(checkpointerPath)
    if not os.path.exists(predictionsPath):
        os.makedirs(predictionsPath)

    csvPath = csvPath + run_name + '_loss.csv'
    checkpointerPath = checkpointerPath + run_name + '.h5'
    summaryPath = predictionsPath + run_name + '_summary.csv'
    predictionsPath = predictionsPath + run_name + '_predictions.csv'

    class_names = [d for d in os.listdir(train_data_dir)]
    no_of_classes = len(class_names)

    train_file_no = 0
    aa = 1
    for x in class_names:
        list_dir = os.path.join(train_data_dir, x)
        for name in os.listdir(list_dir):
            isfile = os.path.isfile(list_dir + '/' + name)
            if isfile:
                train_file_no = train_file_no + 1
                # count files
            if aa == 1 and isfile:  # for one time set the tensor shape
                img = cv2.imread(os.path.join(list_dir + '/', name))
                if setDimensions == 0:  # if do not set dimmensions e3xplicitly do it from the first file
                    img_height, img_width, channels = img.shape
                # set the tensor shape according to image size
                if K.image_data_format() == 'channels_first':
                    input_shape = (channels, img_width, img_height)
                else:
                    input_shape = (img_width, img_height, channels
                                   )  # tensorflow
                aa = 2

    validation_file_no = 0
    for x in class_names:
        list_dir = os.path.join(validation_data_dir, x)
        for name in os.listdir(list_dir):
            isfile = os.path.isfile(list_dir + '/' + name)
            if isfile:
                validation_file_no = validation_file_no + 1
                # count files

    test_file_no = 0
    for x in class_names:
        list_dir = os.path.join(test_data_dir, x)
        for name in os.listdir(list_dir):
            isfile = os.path.isfile(list_dir + '/' + name)
            if isfile:
                test_file_no = test_file_no + 1
                # count files

    train_batches = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        classes=class_names,
        shuffle=True,
        class_mode='categorical',
        batch_size=train_batch_size)

    valid_batches = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        classes=class_names,
        shuffle=True,
        batch_size=valid_batch_size,
        class_mode='categorical')

    test_batches = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
        test_data_dir,
        target_size=(img_width, img_height),
        classes=class_names,
        shuffle=False,
        batch_size=test_batch_size,
        class_mode='categorical')

    # TEST show images
    # imgs,labels = next(train_batches)
    # showImages.plots(imgs, titles=labels)

    # network topology
    model = Sequential()

    model.add(Convolution2D(32, (3, 3), input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(3, 3)))

    model.add(Convolution2D(64, (2, 2)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(3, 3)))

    model.add(Convolution2D(64, (2, 2)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())

    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(BatchNormalization())

    model.add(Dense(no_of_classes))
    model.add(Activation('softmax'))

    # opt = SGD(lr=2e-3, momentum=0.9)
    opt = optimizers.Adam(lr=0.0001,
                          beta_1=0.95,
                          beta_2=0.999,
                          epsilon=1e-08,
                          decay=0.0005)

    print(model.summary())

    if gpu_no > 1:
        model = multi_gpu.make_parallel(model, gpu_no)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    csv_log = callbacks.CSVLogger(csvPath, separator=',', append=False)

    checkpointer = callbacks.ModelCheckpoint(filepath=checkpointerPath,
                                             verbose=0,
                                             save_best_only=True,
                                             mode='min')

    # early_stopping=callbacks.EarlyStopping(monitor='val_loss', min_delta= 0, patience= 0, verbose= 0, mode= 'min')

    tbCallBack = keras.callbacks.TensorBoard(log_dir=graphPath,
                                             histogram_freq=0,
                                             write_graph=True,
                                             write_images=True)

    # print(len(test_img_data))
    history = model.fit_generator(
        train_batches,
        steps_per_epoch=(train_file_no // train_batch_size + 1) * gpu_no,
        epochs=epochs,
        verbose=1,
        validation_data=valid_batches,
        validation_steps=(validation_file_no // valid_batch_size + 1) * gpu_no,
        callbacks=[checkpointer, tbCallBack, csv_log])

    print('Testing model')

    # model = load_model(path + 'Models/02ClNoNoise/' + name + '.h5')

    predictionResult = model.predict_generator(
        test_batches, steps=test_file_no // test_batch_size + 1, verbose=0)
    f_names = test_batches.filenames
    # test_imgs, test_labels = next(test_batches)

    tmp = test_batches.class_indices
    tmp_batch_files = test_batches.filenames
    Y_true = []
    for t in tmp_batch_files:
        for item in tmp:
            if t.split("/")[0] == item:
                Y_true.append(tmp[item])

    Y_true = np_utils.to_categorical(Y_true, no_of_classes)

    # original = sys.stdout
    file_w = open(predictionsPath, 'w')
    file_s = open(summaryPath, 'w')

    predictions = np.argmax(predictionResult, axis=1)

    print >> file_w, run_name
    print >> file_s, run_name

    #print("\n")
    #print("acc: " + str(history.history["acc"][epochs - 1]) + " loss: " + str(
    #    history.history["loss"][epochs - 1]) +
    #      " val_acc: " + str(history.history["val_acc"][epochs - 1]) + " val_loss: " + str(
    #    history.history["val_loss"][epochs - 1]))

    aaa = "acc: " + str(history.history["acc"][epochs - 1]) + " loss: " + str(
        history.history["loss"][epochs - 1]) + " val_acc: " + str(
            history.history["val_acc"][epochs - 1]) + " val_loss: " + str(
                history.history["val_loss"][epochs - 1])

    bbb = "train batch size - " + str(
        train_batch_size) + " validation batch size - " + str(valid_batch_size)

    print >> file_s, "\n"
    print >> file_s, "Stats"
    print >> file_s, aaa

    print >> file_s, "\n"
    print >> file_s, "Batches"
    print >> file_s, bbb

    print >> file_s, "\n"
    print >> file_s, "Classification report"
    cr = classification_report(y_true=np.argmax(Y_true, axis=1),
                               y_pred=predictions,
                               target_names=class_names)
    print >> file_s, cr
    # print(classification_report(y_true=np.argmax(Y_true, axis=1), y_pred=predictions,
    #                            target_names=class_names))

    print >> file_s, "\n"
    print >> file_s, "Confusion Matrix"
    cm = confusion_matrix(np.argmax(Y_true, axis=1), predictions)
    qq = cm.tolist()
    for item in qq:
        print >> file_s, item
    #print>> file_s, qq
    # print(confusion_matrix(np.argmax(Y_true, axis=1), predictions))
    file_s.close()

    print >> file_w, "\n"
    print >> file_w, "Prediction Result"
    qq = predictionResult.tolist()
    for item in qq:
        print >> file_w, item

    print >> file_w, "\n"
    print >> file_w, "Classification Result"
    qq = predictionResult.tolist()
    for item in qq:
        maxVal = max(item)
        maxValIndx = item.index(max(item))
        newRow = [0] * len(item)
        newRow[maxValIndx] = 1
        print >> file_w, newRow

    # print("\n")
    # print("Predictions")
    # print(predictionResult)
    print >> file_w, "\n"
    print >> file_w, "True classes"
    qq = Y_true.tolist()
    for item in qq:
        print >> file_w, item
    # print>>file_w, Y_true

    # print("\n")
    # print("True classes")
    # print(Y_true)
    print >> file_w, "\n"
    print >> file_w, "Filenames"
    for item in f_names:
        print >> file_w, item
    # print>>file_w, f_names

    # print("\n")
    # print("Filenames")
    # print(f_names)

    file_w.close()

    # sys.stdout = original
    sess.close()

    print('done')
Ejemplo n.º 10
0
     '_depth_'+ str(args.depth) + \
     '_AD_' + str(args.AD) + \
     '_nbfilters_' + str(args.nb_filters) + \
     '_name_' + args.name
    return name


input_shape = (args.dimension, args.dimension, 1)

model = cg.residual_projectionNet2(depth=args.depth,
                                   nb_filters=args.nb_filters,
                                   input_shape=input_shape,
                                   dropout=args.dropout)

if args.num_gpus > 1:
    model = make_parallel(model, args.num_gpus)
    args.batch_size = args.batch_size * args.num_gpus

model.name = get_model_name()

if args.pretrained_model is not None:
    best = hf.load_trained_CNN(name=args.pretrained_model, folder='')
    model.set_weights(best.get_weights())

print('model name: ' + model.name)

if args.just_return_name:
    with open('/home/bmkelly/dl-limitedview-prior/tmp.out', 'w') as outfile:
        outfile.write(get_model_name())

    sys.exit()
Ejemplo n.º 11
0
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models')
model_full.load_weights(weights_path)

last_conv_idx = [i for i,l in enumerate(model_full.layers) if type(l) is Conv2D][-1]
conv_layers = model_full.layers[:last_conv_idx + 2]  # max pooling is last layer

last_conv_layer = conv_layers[-1]
model = Model(img_input, last_conv_layer.get_output_at(0), name='vgg16_conv')

# this function simply copies model to multiple gpus and splits up minibatches
# across the gpus
num_gpus = 2
if num_gpus > 1:
  model = make_parallel(model, num_gpus)

path = '/home/cdsw/train_data/256_ObjectCategories/'
# Do not shuffle the data! You'll lose the label ordering
generator = image.ImageDataGenerator()
batches = generator.flow_from_directory(path + 'train', target_size=(224, 224), class_mode='categorical', shuffle=False, batch_size=batch_size)
val_batches = generator.flow_from_directory(path + 'valid', target_size=(224, 224), class_mode='categorical', shuffle=False, batch_size=batch_size)
test_batches = generator.flow_from_directory(path + 'test', target_size=(224, 224), class_mode='categorical', shuffle=False, batch_size=batch_size)
(val_classes, trn_classes, val_labels, trn_labels) = \
(val_batches.classes, batches.classes, to_categorical(val_batches.classes), to_categorical(batches.classes))
test_classes, test_labels = test_batches.classes, to_categorical(test_batches.classes)

def featurize_and_save(phase, batches, labels):
  t0 = time.time()
  conv_feat = model.predict_generator(batches, int(batches.samples / batch_size) + 1)
  c = bcolz.carray(conv_feat, rootdir='./data/conv_%s_feat.dat' % phase)
Ejemplo n.º 12
0
model.add(Convolution2D(256, (kernel_size[0], kernel_size[1])))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Convolution2D(128, (kernel_size[0], kernel_size[1])))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

if ngpus > 1:
    model = make_parallel(model,ngpus)

model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

start_time = time.time()
model.fit(X_train, Y_train, batch_size=batch_size*ngpus, epochs=nb_epoch,
          verbose=1, validation_data=(X_test, Y_test))#, callbacks=[tensorboard])
score = model.evaluate(X_test, Y_test, verbose=0)
model.summary()
print('Test score:', score[0])
print('Test accuracy:', score[1])
duration = time.time() - start_time
print('Total Duration (%.3f sec)' % duration)
def ResNet(kernel_size=(7, 7, 7)):
    input_layer = Input(batch_shape=(None, 91, 109, 91, 1))

    n_filters_conv = 64
    x = conv_bn_act(input_layer,
                    filters=n_filters_conv,
                    kernel_size=kernel_size,
                    padding='valid',
                    base_name='1st',
                    num_layer=1)
    x = conv_bn_act(x,
                    filters=n_filters_conv,
                    kernel_size=kernel_size,
                    padding='valid',
                    base_name='2nd',
                    num_layer=2)

    n_filters_block = 32
    x = Conv3D(filters=n_filters_block,
               kernel_size=kernel_size,
               strides=(2, 2, 2),
               padding='same',
               name='3rd-conv3')(x)
    # 1st block of 2 convolutions/batch-norm/relu followed by addiing of outputs and another block
    block = conv_block(x, n_filters_block, 'block1', kernel_size=kernel_size)
    add1 = Add(name='add1')([x, block])
    block2 = conv_block(add1,
                        n_filters_block,
                        'block2',
                        kernel_size=kernel_size)
    add2 = Add(name='add2')([add1, block2])
    x = bn_act(add2, base_name='bn-act', num_layer=1)

    n_filters_block = 32
    x = Conv3D(filters=n_filters_block,
               kernel_size=kernel_size,
               strides=kernel_size,
               padding='same',
               name='2nd-conv')(x)
    # 2nd block of 2 convolutions/batch-norm/relu followed by addiing of outputs and another block
    block3 = conv_block(x, n_filters_block, 'block3', kernel_size=kernel_size)
    add3 = Add(name='add3')([x, block3])
    block4 = conv_block(add3,
                        n_filters_block,
                        'block4',
                        kernel_size=kernel_size)
    add4 = Add(name='add4')([add3, block4])
    x = bn_act(add4, base_name='bn-act', num_layer=2)

    n_filters_block = 32
    x = Conv3D(filters=n_filters_block,
               kernel_size=kernel_size,
               strides=(2, 2, 2),
               padding='same',
               name='3rd-conv')(x)
    # 3rd block of 2 convolutions/batch-norm/relu followed by addiing of outputs and another block
    block5 = conv_block(x, n_filters_block, 'block5', kernel_size=kernel_size)
    add5 = Add(name='add5')([x, block5])
    block6 = conv_block(add5,
                        n_filters_block,
                        'block6',
                        kernel_size=kernel_size)
    add6 = Add(name='add6')([add5, block6])

    # Finalizing the network
    x = MaxPool3D(pool_size=(2, 2, 2), name='mp3d1')(add6)
    x = Flatten(name='flatten')(x)
    x = Dense(units=128, activation='relu', name='fc1')(x)
    output = Dense(units=2, activation='softmax')(x)

    model = Model(input_layer, output)
    model.summary()

    model = make_parallel(model, 2)
    model.summary()

    model.compile(optimizer=Adam(lr=1e-6),
                  loss='categorical_crossentropy',
                  metrics=['accuracy', balanced_accuracy])
    return model