Exemplo n.º 1
0
def manip(args, test_list, model_list, net_input_shape):
    if args.weights_path == '':
        weights_path = join(args.check_dir, args.output_name + '_model_' + args.time + '.hdf5')
    else:
        weights_path = join(args.data_root_dir, args.weights_path)

    output_dir = join(args.data_root_dir, 'results', args.net, 'split_' + str(args.split_num))
    manip_out_dir = join(output_dir, 'manip_output')
    try:
        makedirs(manip_out_dir)
    except:
        pass

    assert(len(model_list) == 3), "Must be using segcaps with the three models."
    manip_model = model_list[2]
    try:
        manip_model.load_weights(weights_path)
    except:
        print('Unable to find weights path. Testing with random weights.')
    print_summary(model=manip_model, positions=[.38, .65, .75, 1.])


    # Manipulating capsule vectors
    print('Testing... This will take some time...')

    for i, img in enumerate(tqdm(test_list)):
        sitk_img = sitk.ReadImage(join(args.data_root_dir, 'imgs', img[0]))
        img_data = sitk.GetArrayFromImage(sitk_img)
        num_slices = img_data.shape[0]
        sitk_mask = sitk.ReadImage(join(args.data_root_dir, 'masks', img[0]))
        gt_data = sitk.GetArrayFromImage(sitk_mask)

        x, y = img_data[num_slices//2, :, :], gt_data[num_slices//2, :, :]
        x, y = np.expand_dims(np.expand_dims(x, -1), 0), np.expand_dims(np.expand_dims(y, -1), 0)

        noise = np.zeros([1, 512, 512, 1, 16])
        x_recons = []
        for dim in trange(16):
            for r in [-0.25, -0.125, 0, 0.125, 0.25]:
                tmp = np.copy(noise)
                tmp[:, :, :, :, dim] = r
                x_recon = manip_model.predict([x, y, tmp])
                x_recons.append(x_recon)

        x_recons = np.concatenate(x_recons)

        out_img = combine_images(x_recons, height=16)
        out_image = out_img * 4096
        out_image[out_image > 574] = 574
        out_image = out_image / 574 * 255

        Image.fromarray(out_image.astype(np.uint8)).save(join(manip_out_dir, img[0][:-4] + '_manip_output.png'))

    print('Done.')
Exemplo n.º 2
0
def main():
    train_x, test_x, train_y, test_y = loadData()
    train_x, test_x, train_y, test_y = reshapeData(train_x, test_x, train_y, test_y)
    printInfo(train_x, test_x, train_y, test_y)
    model, callbacks_list = keras_model(train_x.shape[1], train_x.shape[2])
    model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=2, batch_size=64,
              callbacks=callbacks_list)
    scores = model.evaluate(test_x, test_y, verbose=1)
    print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
    print_summary(model)

    model.save('RPS.h5')
Exemplo n.º 3
0
def manip(args, test_list, model_list, net_input_shape):
    if args.weights_path == '':
        weights_path = join(args.check_dir, args.output_name + '_model_' + args.time + '.hdf5')
    else:
        weights_path = join(args.data_root_dir, args.weights_path)

    output_dir = join(args.data_root_dir, 'results', args.net, 'split_' + str(args.split_num))
    manip_out_dir = join(output_dir, 'manip_output')
    try:
        makedirs(manip_out_dir)
    except:
        pass

    assert (len(model_list) == 3), "Must be using segcaps with the three models."
    manip_model = model_list[2]
    try:
        manip_model.load_weights(weights_path)
    except:
        print('Unable to find weights path. Testing with random weights.')
    print_summary(model=manip_model, positions=[.38, .65, .75, 1.])

    # Manipulating capsule vectors
    print('Testing... This will take some time...')

    for i, img in enumerate(tqdm(test_list)):
        sitk_img = sitk.ReadImage(join(args.data_root_dir, 'imgs', img[0]))
        img_data = sitk.GetArrayFromImage(sitk_img)
        num_slices = img_data.shape[0]
        sitk_mask = sitk.ReadImage(join(args.data_root_dir, 'masks', img[0]))
        gt_data = sitk.GetArrayFromImage(sitk_mask)

        x, y = img_data[num_slices // 2, :, :], gt_data[num_slices // 2, :, :]
        x, y = np.expand_dims(np.expand_dims(x, -1), 0), np.expand_dims(np.expand_dims(y, -1), 0)

        noise = np.zeros([1, 512, 512, 1, 16])
        x_recons = []
        for dim in trange(16):
            for r in [-0.25, -0.125, 0, 0.125, 0.25]:
                tmp = np.copy(noise)
                tmp[:, :, :, :, dim] = r
                x_recon = manip_model.predict([x, y, tmp])
                x_recons.append(x_recon)

        x_recons = np.concatenate(x_recons)

        out_img = combine_images(x_recons, height=16)
        out_image = out_img * 4096
        out_image[out_image > 574] = 574
        out_image = out_image / 574 * 255

        Image.fromarray(out_image.astype(np.uint8)).save(join(manip_out_dir, img[0][:-4] + '_manip_output.png'))

    print('Done.')
Exemplo n.º 4
0
def main():
    data = pd.read_csv("train_foo.csv")
    dataset = np.array(data)
    np.random.shuffle(dataset)
    X = dataset
    Y = dataset
    print(X.shape)
    print(Y.shape)
    X = X[:, 1:2501]
    Y = Y[:, 0]

    X_train = X[0:5400, :]
    X_train = X_train / 255.
    X_test = X[5400:7199, :]
    X_test = X_test / 255.

    # Reshape
    Y = Y.reshape(Y.shape[0], 1)
    Y_train = Y[0:5400, :]
    Y_train = Y_train.T
    Y_test = Y[5400:7199, :]
    Y_test = Y_test.T

    print("number of training examples = " + str(X_train.shape[0]))
    print("number of test examples = " + str(X_test.shape[0]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))
    image_x = 50
    image_y = 50

    train_y = np_utils.to_categorical(Y_train)
    test_y = np_utils.to_categorical(Y_test)
    train_y = train_y.reshape(train_y.shape[1], train_y.shape[2])
    test_y = test_y.reshape(test_y.shape[1], test_y.shape[2])
    X_train = X_train.reshape(X_train.shape[0], 50, 50, 1)
    X_test = X_test.reshape(X_test.shape[0], 50, 50, 1)
    print("X_train shape: " + str(X_train.shape))
    print("X_test shape: " + str(X_test.shape))

    model, callbacks_list = keras_model(image_x, image_y)
    model.fit(X_train,
              train_y,
              validation_data=(X_test, test_y),
              epochs=5,
              batch_size=64,
              callbacks=callbacks_list)
    scores = model.evaluate(X_test, test_y, verbose=0)
    print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
    print_summary(model)

    model.save('emojinator.h5')
Exemplo n.º 5
0
    def define_entire_network(self, input_shape, learning_r):
        self.input = Input(shape=input_shape)

        # "encoded" is the encoded representation of the input
        encoded = Conv2D(filters=40,
                         kernel_size=(4, 1),
                         activation='relu',
                         padding='valid')(self.input)
        encoded = Conv2D(filters=80,
                         kernel_size=(2, 1),
                         strides=(2, 1),
                         activation='relu',
                         padding='valid')(encoded)
        encoded = Conv2D(filters=80,
                         kernel_size=(2, 1),
                         activation='relu',
                         padding='valid')(encoded)
        encoded = Conv2D(filters=120,
                         kernel_size=(3, 1),
                         strides=(3, 1),
                         activation='relu',
                         padding='valid')(encoded)
        self.encoded = encoded

        decoded = Conv2DTranspose(filters=120, kernel_size=(3, 1),
           strides=(3, 1), activation='relu', padding='valid')\
           (self.encoded)
        decoded = Conv2DTranspose(filters=80,
                                  kernel_size=(2, 1),
                                  activation='relu',
                                  padding='valid')(decoded)
        decoded = Conv2DTranspose(filters=80,
                                  kernel_size=(2, 1),
                                  strides=(2, 1),
                                  activation='relu',
                                  padding='valid')(decoded)
        decoded = Conv2DTranspose(filters=40,
                                  kernel_size=(4, 1),
                                  activation='relu',
                                  padding='valid')(decoded)
        decoded = Conv2D(filters=2,
                         kernel_size=(1, 1),
                         activation='relu',
                         padding='valid')(decoded)
        self.decoded = decoded

        # this model maps an input to its reconstruction
        self.model = Model(self.input, self.decoded)

        self.model.compile(optimizer=Adam(lr=learning_r), loss='mse')
        # self.model.compile(optimizer=Adadelta(), loss='mse')
        print_summary(self.model, line_length=80)
def main():
    data = pd.read_csv(
        "C:/Users/meena/Desktop/Eckovation work/MachineLearning/data.csv")
    dataset = np.array(data)
    np.random.shuffle(dataset)
    X = dataset
    Y = dataset
    X = X[:, 0:1024]
    Y = Y[:, 1024]

    X_train = X[0:70000, :]
    X_train = X_train / 255.
    X_test = X[70000:72001, :]
    X_test = X_test / 255.

    # Reshape
    Y = Y.reshape(Y.shape[0], 1)
    Y_train = Y[0:70000, :]
    Y_train = Y_train.T
    Y_test = Y[70000:72001, :]
    Y_test = Y_test.T
    print("number of training examples = " + str(X_train.shape[0]))
    print("number of test examples = " + str(X_test.shape[0]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))
    image_x = 32
    image_y = 32

    train_y = np_utils.to_categorical(Y_train)
    test_y = np_utils.to_categorical(Y_test)
    train_y = train_y.reshape(train_y.shape[1], train_y.shape[2])
    test_y = test_y.reshape(test_y.shape[1], test_y.shape[2])
    X_train = X_train.reshape(X_train.shape[0], 32, 32, 1)

    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(train_y.shape))
    X_test = X_test.reshape(X_test.shape[0], 32, 32, 1)

    model, callbacks_list = keras_model(image_x, image_y)
    model.fit(X_train,
              train_y,
              validation_data=(X_test, test_y),
              epochs=1,
              batch_size=64,
              callbacks=callbacks_list)
    scores = model.evaluate(X_test, test_y, verbose=0)
    print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
    print_summary(model)

    model.save('devanagari.h5')
Exemplo n.º 7
0
def model_summary(model, fname):
    """ -------------------------------------------------------------------------------------------------------------
    Print a summary of the model, and plot a graph of the model

    model:          [keras.engine.training.Model]
    fname:          [str] name of the output image with path but without extension
    ------------------------------------------------------------------------------------------------------------- """
    utils.print_summary(model)
    fname += '.png'
    utils.plot_model(model,
                     to_file=fname,
                     show_shapes=True,
                     show_layer_names=True)
Exemplo n.º 8
0
def main():
    features, labels = loadFromPickle()
    features, labels = augmentData(features, labels)
    features, labels = shuffle(features, labels)
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                        test_size=0.1)
    train_x = train_x.reshape(train_x.shape[0], 40, 40, 1)
    test_x = test_x.reshape(test_x.shape[0], 40, 40, 1)
    model, callbacks_list = keras_model()
    model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=5, batch_size=64,
              callbacks=callbacks_list)
    print_summary(model)
    model.save('Autopilot.h5')
Exemplo n.º 9
0
def train():

    model = UNet(cfg.input_shape)

    #编译和打印模型
    model.compile(optimizer=cfg.optimizer, loss=cfg.loss, metrics=cfg.metrics)
    print_summary(model=model)

    #训练数据生成器G1
    G1 = imageSegmentationGenerator(cfg.train_images, cfg.train_annotations,
                                    cfg.train_batch_size, cfg.n_classes,
                                    cfg.input_shape[0], cfg.input_shape[1],
                                    cfg.output_shape[0], cfg.output_shape[1])
    #测试数据生成器G2
    if cfg.validate:
        G2 = imageSegmentationGenerator(cfg.val_images, cfg.val_annotations,
                                        cfg.val_batch_size, cfg.n_classes,
                                        cfg.input_shape[0], cfg.input_shape[1],
                                        cfg.output_shape[0],
                                        cfg.output_shape[1])
    #循环训练
    save_index = 1
    for ep in range(cfg.epochs):
        #1、训练两种方式
        if not cfg.validate:  #只有G1
            hisroy = model.fit_generator(
                G1,
                steps_per_epoch=cfg.train_steps_per_epoch,
                workers=cfg.workers,
                epochs=1,
                verbose=1,
                use_multiprocessing=cfg.use_multiprocessing)
        else:  #有G1和G2
            hisroy = model.fit_generator(
                G1,
                steps_per_epoch=cfg.train_steps_per_epoch,
                workers=cfg.workers,
                epochs=1,
                verbose=1,
                use_multiprocessing=cfg.use_multiprocessing,
                validation_data=G2,
                validation_steps=cfg.validate_steps_per_epoch)

        # 2、保存模型
        if save_index == cfg.epochs_save:
            save_index = 1
            save_weights_name = 'model.{}'.format(ep)
            save_weights_path = os.path.join(cfg.save_weights_path,
                                             save_weights_name)
            model.save_weights(save_weights_path)
        save_index += 1
Exemplo n.º 10
0
def get_2d_lstm_model():

    input_A = Input(shape=(None, 108, 192, 3))

    image_model = TimeDistributed(
        MobileNet(input_shape=(108, 192, 3),
                  alpha=0.25,
                  depth_multiplier=1,
                  dropout=1e-3,
                  include_top=False,
                  weights=None,
                  input_tensor=None,
                  pooling='avg'))(input_A)

    image_model = Bidirectional(LSTM(16, activation=None))(image_model)

    image_model = BatchNormalization()(image_model)

    image_model = Activation('relu')(image_model)

    input_B = Input(shape=(128, 259, 1))

    audio_model = MobileNet(input_shape=(128, 259, 1),
                            alpha=0.25,
                            depth_multiplier=1,
                            dropout=1e-3,
                            include_top=False,
                            weights=None,
                            input_tensor=None,
                            pooling='avg')(input_B)

    audio_model = Dense(32)(audio_model)

    audio_model = BatchNormalization()(audio_model)

    audio_model = Activation('relu')(audio_model)

    model = Concatenate()([image_model, audio_model])

    model = Dense(1, activation='sigmoid')(model)

    model = Model(inputs=[input_A, input_B], outputs=model)

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print_summary(model, line_length=120)
    #plot_model(model, show_shapes=True, to_file='model.png')

    return model
Exemplo n.º 11
0
def main():
    features, labels = loadFromPickle()
    # features, labels = augmentData(features, labels)
    features, labels = shuffle(features, labels)
    labels=prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                        test_size=0.1)
    train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)
    test_x = test_x.reshape(test_x.shape[0], 28, 28, 1)
    model, callbacks_list = keras_model(28,28)
    print_summary(model)
    model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=3, batch_size=64,
              callbacks=[TensorBoard(log_dir="QuickDraw")])
    model.save('QuickDraw.h5')
Exemplo n.º 12
0
Arquivo: ntm.py Projeto: tsjayram/maes
    def pretty_print_str(self):
        model = self.model

        def print_fn(x):
            out.write(x)
            out.write('\n')

        h_line = '*' * 80 + '\n'
        out = io.StringIO()
        out.write('\n' + h_line)
        print_summary(model, line_length=200, positions=[.25, .7, .8, 1.],
                      print_fn=print_fn)
        out.write(h_line)
        return out.getvalue()
Exemplo n.º 13
0
def get_3d_cnn_model_image_audio():
    input_A = Input(shape=(4, 108, 192, 3))

    image_model = Convolution3D(filters=32,
                                kernel_size=(3, 3, 3),
                                activation='relu',
                                padding='same',
                                data_format='channels_last')(input_A)

    image_model = MaxPooling3D(pool_size=(2, 2, 2))(image_model)

    image_model = Flatten()(image_model)

    image_model = Dense(32)(image_model)

    image_model = BatchNormalization()(image_model)

    image_model = Activation('relu')(image_model)

    input_B = Input(shape=(128, 345, 1))

    audio_model = MobileNet(input_shape=(128, 345, 1),
                            alpha=0.25,
                            depth_multiplier=1,
                            dropout=1e-3,
                            include_top=False,
                            weights=None,
                            input_tensor=None,
                            pooling='avg')(input_B)

    audio_model = Dense(32)(audio_model)

    audio_model = BatchNormalization()(audio_model)

    audio_model = Activation('relu')(audio_model)

    model = Concatenate()([image_model, audio_model])

    model = Dense(1, activation='sigmoid')(model)

    model = Model(inputs=[input_A, input_B], outputs=model)

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print_summary(model, line_length=120)

    return model
Exemplo n.º 14
0
def main():
    # prepare_data('./data')
    dataset = pd.read_csv('train_foo.csv')
    dataset = np.array(dataset)
    np.random.shuffle(dataset)
    X = dataset[:, 1:]
    Y = dataset[:, 0:1]
    X_train = X[0:2199, :]
    X_train = X_train / 255.
    X_test = X[2199:2749, :]
    X_test = X_test / 255.
    Y = Y.reshape(Y.shape[0], 1)
    Y_train = Y[0:2199, :]
    Y_train = Y_train.T
    Y_test = Y[2199:2749, :]
    Y_test = Y_test.T
    print("number of training examples = " + str(X_train.shape[0]))
    print("number of test examples = " + str(X_test.shape[0]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))
    image_x = 256
    image_y = 256
    print('trainY', Y_train.shape)
    train_y = np_utils.to_categorical(Y_train)
    test_y = np_utils.to_categorical(Y_test)
    train_y = train_y.reshape(train_y.shape[1], train_y.shape[2])
    test_y = test_y.reshape(test_y.shape[1], test_y.shape[2])
    X_train = X_train.reshape(X_train.shape[0], 256, 256, 1)
    X_test = X_test.reshape(X_test.shape[0], 256, 256, 1)
    print("X_train shape: " + str(X_train.shape))
    print("X_test shape: " + str(X_test.shape))
    model, callbacks_list = keras_model(image_x, image_y)
    print('callbacks', callbacks_list)
    print('train y ', train_y)

    model.fit(X_train,
              train_y,
              validation_data=(X_test, test_y),
              epochs=10,
              batch_size=64,
              callbacks=callbacks_list)
    scores = model.evaluate(X_test, test_y, verbose=0)
    print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
    print_summary(model)

    model.save('emojinator.h5')
Exemplo n.º 15
0
def test_model2(input_length):
    features, labels = loadFromPickle()
    features, labels = shuffle(features, labels)
    features=features.reshape(features.shape[0],input_length)
    labels=prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                        test_size=0.2)
    model, callbacks_list = keras_model2((input_length,),len(labels[0]))
    print_summary(model)
    model.fit(train_x, train_y, batch_size=128, epochs=20, verbose=1, validation_data=(test_x, test_y),
        callbacks=[TensorBoard(log_dir="TensorBoard")])

    score = model.evaluate(test_x, test_y, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1]) # 准确度
    model.save('test_model2.h5') # 保存训练模型
Exemplo n.º 16
0
def validate():
    data_dir = ".\\data\\"
    label_names = get_labels(data_dir)
    model = load_model('asr_model.h5') # 加载训练模型

    features, labels = loadFromPickle()
    features, labels = shuffle(features, labels)
    features=features.reshape(features.shape[0],20,32,1)
    labels=prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                        test_size=0.3)
    print_summary(model)

    # 开始评估模型效果 # verbose=0为不输出日志信息
    score = model.evaluate(test_x, test_y, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1]) # 准确度	
Exemplo n.º 17
0
def main():
    dataset = pd.read_csv("train_foo.csv")
    # Normalizing the training dataset (train.csv file)# Norma 
    dataset = dataset.sample(frac=1)
    X = dataset.iloc[:,1:]
    Y = np.array(dataset.iloc[:,0])
    X = X/255.                      # for normalizing the inputs
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=123, train_size = 0.8)
    
    #X_train = X_train.T
    #X_test = X_test.T
    print(Y_train.shape[0])
    Y_train = Y_train.reshape(Y_train.shape[0],1)
    Y_test = Y_test.reshape(-1,1)
    # Reshape
    print("number of training examples = " + str(X_train.shape[0]))
    print("number of test examples = " + str(X_test.shape[0]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))
    image_x = 50
    image_y = 50

    import ipdb
    #ipdb.set_trace()
    train_y = np_utils.to_categorical(Y_train)
    test_y = np_utils.to_categorical(Y_test)
    train_y = train_y.reshape(train_y.shape[0], train_y.shape[1])
    test_y = test_y.reshape(test_y.shape[0], test_y.shape[1])
    X_train = np.array(X_train)
    X_train = X_train.reshape(X_train.shape[0], 50, 50, 1)
    X_test = np.array(X_test)
    X_test = X_test.reshape(X_test.shape[0], 50, 50, 1)
    print("X_train shape: " + str(X_train.shape))
    print("X_test shape: " + str(X_test.shape))

    model, callbacks_list = keras_model(image_x, image_y)
    model.fit(X_train, train_y, validation_data=(X_test, test_y), epochs=10, batch_size=64,
              callbacks=callbacks_list)
    scores = model.evaluate(X_test, test_y, verbose=0)
    print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
    print_summary(model)

    model.save('emojinator.h5')
Exemplo n.º 18
0
 def __init__(self, _train_list, _val_list, _inf_list, _dag_it = 0, _input_shape = (256, 1024, 3),
              _train_steps = 500, _val_steps = 200, _num_epochs = 15, _batch_size = 4, _gpu_num = '0, 1',
              _no_inidices = True, _segnet = False):
     self.dag_it = _dag_it
     self.train_list = _train_list
     self.val_list = _val_list
     self.inf_list = _inf_list
     self.base_dir = '/media/localadmin/Test/11Nils/kitti/dataset/sequences/Data/'
     self.img_dir = 'images/'
     self.label_dir = 'labels/'
     self.inf_dir = 'inf/'
     self.dag_dir = 'dagger/'
     self.log_dir = 'log/'
     self.optimizer = 'adagrad'
     self.gpu_num = _gpu_num  # '1'
     os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
     os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu_num
     self.untrained = 'store_true'
     self.loss = 'categorical_crossentropy'
     self.output_mode = 'softmax'
     self.pool_size = (2, 2)
     self.kernel = 3
     self.input_shape = _input_shape  # (128, 512, 3)
     self.n_labels = 3  # num classes
     self.val_steps = _val_steps
     self.epoch_steps = _train_steps
     self.n_epochs = _num_epochs
     self.batch_size = _batch_size
     self.filters = 8
     self.b_pool_indices = _no_inidices
     self.b_use_segnet = _segnet
     if not self.b_pool_indices and not self.b_use_segnet:
         self.model = unet_wIndices(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                                    self.output_mode)
     elif not self.b_use_segnet:
         self.model = unet(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                           self.output_mode)
     else:
         self.model = segnet(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                             self.output_mode)
     print(self.model.summary())
     list_gpus_trained = [int(x) for x in self.gpu_num.split(',')]
     self.num_gpus = len(list_gpus_trained)
     if self.num_gpus > 1:
         trained_gpu_str = ', '.join(str(e) for e in list_gpus_trained)
         print('Training on GPU\'s: ' + trained_gpu_str)
         self.multi_model = multi_gpu_model(self.model, gpus = self.num_gpus)
     else:
         self.multi_model = self.model
     self.multi_model.compile(loss = self.loss, optimizer = self.optimizer, metrics = ['accuracy'])
     plot_model(model = self.multi_model, to_file = self.base_dir + 'model.png')
     print(print_summary(self.multi_model))
     self.std = [0.32636853, 0.31895106, 0.30716496]
     self.mean = [0.39061851, 0.38151629, 0.3547171]
     self.es_cb = []
     self.tb_cb = []
     self.cp_cb = []
Exemplo n.º 19
0
def test_model():
    features, labels = loadFromPickle()
    features, labels = shuffle(features, labels)
    features=features.reshape(features.shape[0],64,40,1)
    labels=prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                        test_size=0.1)
    model, callbacks_list = keras_model((64,40,1,),len(labels[0]))
    print_summary(model)
    model.fit(train_x, train_y, batch_size=128, epochs=5, verbose=1, validation_data=(test_x, test_y),
    	callbacks=[TensorBoard(log_dir="TensorBoard")])

    # 开始评估模型效果 # verbose=0为不输出日志信息
    score = model.evaluate(test_x, test_y, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1]) # 准确度

    model.save('asr_mfcc_conv2d_model.h5') # 保存训练模型
Exemplo n.º 20
0
def main():
    features, labels = loadFromPickle()
    features, labels = shuffle(features, labels)
    train_x, test_x, train_y, test_y = train_test_split(features,
                                                        labels,
                                                        random_state=0,
                                                        test_size=0.3)
    train_x = train_x.reshape(train_x.shape[0], 100, 100, 1)
    test_x = test_x.reshape(test_x.shape[0], 100, 100, 1)
    model, callbacks_list = keras_model(100, 100)
    print_summary(model)

    model.fit(train_x,
              train_y,
              validation_data=(test_x, test_y),
              epochs=32,
              batch_size=32,
              callbacks=callbacks_list)
Exemplo n.º 21
0
def model_deploy():
    features, labels = load_numpy()
    features, labels = shuffle(features, labels)
    print(labels.shape)
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                            test_size=0.1)
    train_x = train_x.reshape(train_x.shape[0], 320, 180, 1)
    test_x = test_x.reshape(test_x.shape[0], 320, 180, 1)
    model, callbacks_list = experimental(320, 180)
    parallel_model = multi_gpu_model(model, gpus=2)
    parallel_model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])
    parallel_model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=50, batch_size=8,
               callbacks=callbacks_list)
    #model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=50, batch_size=8,
    #           callbacks=callbacks_list)
    print_summary(model)
    
    model.save('Autopilot_10.h5')
Exemplo n.º 22
0
    def log_model(self, models_local_folder, score, model):
        """

        Args:
            :param score:
            :param models_local_folder:
            :param model:
        """
        model_number = np.fromfile(os.path.join(MODEL_OUT_DIR,
                                                models_local_folder,
                                                "model_number.txt"),
                                   dtype=int)
        model_file_name = models_local_folder + "-" + str(model_number[0] - 1)

        self.log(
            "=========================================Start of Log=============================================="
        )
        self.log("Trained model " + model_file_name + ".json")
        self.log(time.strftime("%A %B %d,%Y %I:%M%p"))
        self.log("Dataset dir: " + DATA_SET_DIR)
        print_summary(model, print_fn=self.log)
        self.log("Parameters")
        self.log("_______________________________________")
        self.log("Batch size    : " + str(BATCH_SIZE))
        self.log("Batches per Epoch    : " + str(STEPS_PER_EPOCH))
        self.log("Epochs       : " + str(EPOCHS))
        self.log("Learning rate : " + str(LEARNING_RATE))
        self.log("_______________________________________")
        self.log("Loss          : " + str(score[0]))
        self.log("Accuracy      : " + str(score[1]))
        self.log(
            "=========================================End of Log================================================="
        )
        self.log(
            "===================================================================================================="
        )
        self.log(
            "----------------------------------------------------------------------------------------------------"
        )
        plot_model(model,
                   show_shapes=True,
                   to_file=MODEL_OUT_DIR + '/' + models_local_folder + '/' +
                   model_file_name + ".png")
Exemplo n.º 23
0
    def create_model(self):
        # from keras import Model,Input,
        from keras.models import Sequential, Model
        from keras.layers import Dense, Dropout, Input, Reshape, BatchNormalization  # , Flatten
        from keras.layers import Lambda, TimeDistributed, Activation, Conv2D, MaxPooling2D  # , Merge
        from keras import backend as K

        def ctc_lambda_func(inputs):
            softmax_out, label, data_lengths, label_lenghts = inputs
            max_data_len = K.max(data_lengths)
            max_out_len = K.cast(K.shape(softmax_out)[1],
                                 dtype=data_lengths.dtype)
            out_lengths = K.round(data_lengths * max_out_len / max_data_len)
            return K.ctc_batch_cost(y_true=label,
                                    y_pred=softmax_out,
                                    input_length=out_lengths,
                                    label_length=label_lenghts)

        label = Input(name=PADDED_LABELS_NAME, shape=[None], dtype='float32')
        data_lengths = Input(name=DATA_LENGTHS_NAME, shape=[1], dtype='int64')
        label_lenghts = Input(name=LABEL_LENGHTS_NAME,
                              shape=[1],
                              dtype='int64')

        input_data, softmax_out = self.main_structure(
            input_layername=PADDED_DATAS_NAME)
        predict_model = Model(input_data, softmax_out)

        loss_out = Lambda(ctc_lambda_func,
                          output_shape=(1, ),
                          name=CTC_LOSS_NAME)([
                              softmax_out, label, data_lengths, label_lenghts
                          ])

        model = Model([input_data, label, data_lengths, label_lenghts],
                      loss_out)

        self.model_summary = ''
        print_summary(model, print_fn=self._capture_summary)
        # model.summary()

        return model, predict_model
Exemplo n.º 24
0
def main():
    features, labels = loadFromPickle()
    features = preprocessFeatures(features)
    # features, labels = augmentData(features, labels) Commented for now
    features, labels = shuffle(features, labels)
    train_x, test_x, train_y, test_y = train_test_split(features,
                                                        labels,
                                                        random_state=0,
                                                        test_size=0.1)
    train_x = train_x.reshape(train_x.shape[0], 80, 60, 1)
    test_x = test_x.reshape(test_x.shape[0], 80, 60, 1)
    model, callbacks_list = keras_model(80, 60)
    print_summary(model)
    model.fit(train_x,
              train_y,
              validation_data=(test_x, test_y),
              epochs=3,
              batch_size=64,
              callbacks=[TensorBoard(log_dir="Self_drive_dc")])
    model.save('Self_drive_dc.h5')
Exemplo n.º 25
0
def main():
    features, labels = loadFromPickle()
    # features, labels = augmentData(features, labels)
    features, labels = shuffle(features, labels)
    features = features / 255.
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                        test_size=0.1)
    train_x = train_x.reshape(train_x.shape[0], 100, 100, 1)
    test_x = test_x.reshape(test_x.shape[0], 100, 100, 1)

    train_y = np_utils.to_categorical(train_y)
    test_y = np_utils.to_categorical(test_y)

    model, callbacks_list = keras_model(image_x=100, image_y=100)
    print_summary(model)
    model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=5, batch_size=64,
              callbacks=callbacks_list)
    scores = model.evaluate(test_x, test_y, verbose=0)
    print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
    model.save('HTML.h5')
Exemplo n.º 26
0
def HighRes3DNet_cs(
        input_shape=(96, 96, 96, 1), weights=False, summary=True,
        weights_dir=None):
    """
    From the base model, create a variation for classification.
    """

    model = HighRes3DNet_base(input_shape, weights, summary, weights_dir)

    # Get input
    new_input = model.input
    # Find the layer to connect
    hidden_layer = model.layers[-4].output
    print(hidden_layer.name)
    x = layers.MaxPooling3D(pool_size=(3, 3, 3))(hidden_layer)
    x = layers.Flatten()(x)
    # x = layers.Dense(512, kernel_initializer='he_normal', name='fc0')(x)
    # x = layers.Conv1D(512, kernel_initializer='he_normal', name='fc0')(x)
    # x = layers.BatchNormalization(axis=-1, name='fc0_bn')(x)
    # x = layers.Dense(1024, kernel_initializer='he_normal', name='fc1')(x)
    # x = layers.BatchNormalization(axis=-1, name='fc1_bn')(x)
    x = layers.Dense(3, activation='softmax', name='fc2')(x)

    # Build a new model
    model_2 = models.Model(new_input, x)

    # Remove last layer and add our own

    # Fix all layers but last ones (variable)
    # for layer in model_2.layers[:-10]:
    #     layer.trainable = False

    if summary:
        with open('modelsummary_base.txt', 'w') as f:
            with redirect_stdout(f):
                utils.print_summary(model, line_length=110, print_fn=None)

        with open('modelsummary.txt', 'w') as f:
            with redirect_stdout(f):
                utils.print_summary(model_2, line_length=110, print_fn=None)
    return model_2
Exemplo n.º 27
0
def model_summary(model, fname='model'):
    """ -------------------------------------------------------------------------------------------------
    Print a summary of the model, and plot a graph of the model and save it to a file

    model:          [keras.engine.training.Model]
    fname:          [str] name of the output image without extension
    ------------------------------------------------------------------------------------------------- """
    if PLOT:
        utils.print_summary(model)

        d = os.path.join(cnfg['dir_current'], dir_plot)
        if not os.path.exists(d):
            os.makedirs(d)

        f = os.path.join(d, fname + plot_ext)

        #utils.plot_model( model, to_file=f, show_shapes=True, show_layer_names=True, expand_nested=True )
        utils.plot_model(model,
                         to_file=f,
                         show_shapes=True,
                         show_layer_names=True)
Exemplo n.º 28
0
def main():
    with open("features", "rb") as f:
        features = np.array(pickle.load(f))
    with open("labels", "rb") as f:
        labels = np.array(pickle.load(f))
    features, labels = shuffle(features, labels)
    labels = np_utils.to_categorical(labels)
    train_x, test_x, train_y, test_y = train_test_split(features,
                                                        labels,
                                                        random_state=0,
                                                        test_size=0.1)
    train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)
    test_x = test_x.reshape(test_x.shape[0], 28, 28, 1)
    print("train_X: " + str(train_x.shape))
    print("test_X: " + str(test_x.shape))
    model, callbacks_list = Model(28, 28)
    print_summary(model)
    summary = model.fit(train_x,
                        train_y,
                        validation_data=(test_x, test_y),
                        epochs=16,
                        batch_size=64,
                        callbacks=[TensorBoard(log_dir="QuickDraw")])
    #plot result acc
    plt.plot(summary.history['acc'])
    plt.title("Model Accuracy")
    plt.ylabel("Accuracy")
    plt.xlabel("Epochs")
    plt.legend(['train'], loc='upper right')
    plt.show()

    #plot loss result
    plt.plot(summary.history['loss'])
    plt.title("Model Loss")
    plt.ylabel("Loss")
    plt.xlabel("Epochs")
    plt.legend(['train'], loc='upper right')
    plt.show()
    model.save('QuickDraw.h5')
Exemplo n.º 29
0
    def log_model(self, score, model, history, y, predictions):
        """
        Logs the performance and architecture of a model
        Args:
            score:
            model:
        """
        model_number = np.fromfile(os.sep.join(
            [self.output_dir, "model_number.txt"]),
                                   dtype=int)
        model_file_name = self.net_type + "-" + str(model_number[0] - 1)
        out = os.sep.join([self.output_dir, model_file_name])

        self.log(
            "=========================================Start of Log=============================================="
        )
        self.log("Trained model " + model_file_name + ".json")
        self.log(time.strftime("%A %B %d,%Y %I:%M%p"))
        self.log("Dataset dir: " + DATA_SET_DIR)
        print_summary(model, print_fn=self.log)
        self.log("___________Parameters:______________")
        self.log("Batch size    : " + str(BATCH_SIZE))
        self.log("Epochs       : " + str(EPOCHS))
        self.log("Learning rate : " + str(LEARNING_RATE))
        self.log("___________Metrics___________________")
        self.log("Loss          : " + str(score[0]))
        self.log("MSE      : " + str(score[1]))
        self.log(
            "=========================================End of Log================================================="
        )
        self.log(
            "===================================================================================================="
        )
        self.log(
            "----------------------------------------------------------------------------------------------------"
        )
        plot_model(model, show_shapes=True, to_file=out + "-model.png")
        plot_history(history, out + "-history.png")
        plot_predictions(y, predictions, out + "-predictions.png")
Exemplo n.º 30
0
def get_model_to_train(gpu=False):
    inputs = Input(shape=(256,8))
    # inputs = Input(shape=X_train[0].shape)

    lstm = None

    lstmNodes = 88
    if gpu:
        lstm = Bidirectional(CuDNNLSTM(lstmNodes, unit_forget_bias=True), merge_mode='concat')(inputs)
    else:
        lstm = Bidirectional(LSTM(lstmNodes, unit_forget_bias=True), merge_mode='concat')(inputs)

    pred1 = Dense(88, activation='sigmoid')(lstm)

    model = Model(inputs=inputs, outputs=[pred1])
    opt = Adam(lr=1e-3, decay=1e-5)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['categorical_accuracy'])

    checkpointer = ModelCheckpoint(filepath='second.hdf5', verbose=0, save_best_only=True)

    print_summary(model)
    return model, checkpointer
Exemplo n.º 31
0
def main():
    features, labels = loadFromPickle()
    features = features / 127.5 - 1.
    features, labels = shuffle(features, labels)
    train_x, test_x, train_y, test_y = train_test_split(features,
                                                        labels,
                                                        random_state=0,
                                                        test_size=0.3)
    train_x, test_x, train_y, test_y = reshapeData(train_x, test_x, train_y,
                                                   test_y)
    printInfo(train_x, test_x, train_y, test_y)
    model, callbacks_list = keras_model(train_x.shape[1], train_x.shape[2])
    model.fit(train_x,
              train_y,
              validation_data=(test_x, test_y),
              epochs=1,
              batch_size=64,
              callbacks=callbacks_list)
    scores = model.evaluate(test_x, test_y, verbose=1)
    print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
    print_summary(model)

    model.save('emojigram.h5')
Exemplo n.º 32
0
def main(args):
    # Ensure training, testing, and manip are not all turned off
    assert (args.train or args.test or args.manip), 'Cannot have train, test, and manip all set to 0, Nothing to do.'

    # Load the training, validation, and testing data
    try:
        train_list, val_list, test_list = load_data(args.data_root_dir, args.split_num)
    except:
        # Create the training and test splits if not found
        split_data(args.data_root_dir, num_splits=4)
        train_list, val_list, test_list = load_data(args.data_root_dir, args.split_num)

    # Get image properties from first image. Assume they are all the same.
    img_shape = sitk.GetArrayFromImage(sitk.ReadImage(join(args.data_root_dir, 'imgs', train_list[0][0]))).shape
    net_input_shape = (img_shape[1], img_shape[2], args.slices)

    # Create the model for training/testing/manipulation
    model_list = create_model(args=args, input_shape=net_input_shape)
    print_summary(model=model_list[0], positions=[.38, .65, .75, 1.])

    args.output_name = 'split-' + str(args.split_num) + '_batch-' + str(args.batch_size) + \
                       '_shuff-' + str(args.shuffle_data) + '_aug-' + str(args.aug_data) + \
                       '_loss-' + str(args.loss) + '_slic-' + str(args.slices) + \
                       '_sub-' + str(args.subsamp) + '_strid-' + str(args.stride) + \
                       '_lr-' + str(args.initial_lr) + '_recon-' + str(args.recon_wei)
    args.time = time

    args.check_dir = join(args.data_root_dir,'saved_models', args.net)
    try:
        makedirs(args.check_dir)
    except:
        pass

    args.log_dir = join(args.data_root_dir,'logs', args.net)
    try:
        makedirs(args.log_dir)
    except:
        pass

    args.tf_log_dir = join(args.log_dir, 'tf_logs')
    try:
        makedirs(args.tf_log_dir)
    except:
        pass

    args.output_dir = join(args.data_root_dir, 'plots', args.net)
    try:
        makedirs(args.output_dir)
    except:
        pass

    if args.train:
        from train import train
        # Run training
        train(args, train_list, val_list, model_list[0], net_input_shape)

    if args.test:
        from test import test
        # Run testing
        test(args, test_list, model_list, net_input_shape)

    if args.manip:
        from manip import manip
        # Run manipulation of segcaps
        manip(args, test_list, model_list, net_input_shape)