def create_xception_model(self):
     #vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')#包含最上层的连接层,imagenet表示加载imagenet与训练的网络权重.
     xception_model=Xception(include_top=True, weights='imagenet', input_tensor=None)
     xception_model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
     xception_model.summary()
     plot_model(xception_model, to_file='xception_model.png', show_shapes=True)
     return xception_model
Beispiel #2
0
    def create_model(self, weights_filename=None):
        model = Xception(weights=None, input_shape=[self.height, self.width, self.depth],
                         include_top=True, classes=self.n_classes)

        if weights_filename is not None:
            model.load_weights(weights_filename)

        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=self.metrics)

        return model
def Make_Model(modelConfig, datasetConfig):
    
    strModelType = modelConfig.MODEL_TYPE
    strPretrained = modelConfig.PRETRAINED_MODEL
    im_Shape = datasetConfig.IMG_SHAPE
    strOptimizer = modelConfig.OPTIMIZER
    num_Classes = datasetConfig.NUM_CLASS
    learingRate = modelConfig.LEARNING_RATE
    decay = modelConfig.DECAY
    momentum = modelConfig.MOMENTUM
    loss = modelConfig.LOSS

    optimizer = None 

    if(strOptimizer == "SGD"):
        optimizer = SGD(lr=learingRate, decay=decay, momentum=momentum, nesterov=True) # decay = 1e-4
    elif(strOptimizer == "ADAM"):
        optimizer = Adam(lr=learingRate, decay=decay)
    else:
        print("No Such a Optimizer") 
        return None

    model = None 
    if(strModelType == "VGG16"):
        model = VGG16(weights=strPretrained, include_top=True, input_shape=im_Shape,classes = num_Classes)
    elif(strModelType == "RESNET50"):
        model = ResNet50(weights=strPretrained, include_top=True, input_shape=im_Shape,classes = num_Classes)
    elif(strModelType == "RESNET152"):
        model = build_Resnet152_Model(im_Shape, num_Classes, strPretrained)
    elif(strModelType == "INCEPTIONV3"):
        model = InceptionV3(weights=strPretrained, include_top=True, input_shape=im_Shape,classes = num_Classes)
    elif(strModelType == "INCEPTIONRESV2"):
        model = InceptionResNetV2(weights=strPretrained, include_top=True, input_shape=im_Shape,classes = num_Classes)
    elif(strModelType == "SEINCEPTIONRESV2"):
        model = SEInceptionResNetV2(weights=strPretrained, include_top=True, input_shape=im_Shape,classes = num_Classes)
    elif(strModelType == "XCEPTION"):
        model = Xception(weights=strPretrained, include_top=True, input_shape=im_Shape,classes = num_Classes)
        #basemodel = Xception(weights="imagenet", include_top=True, input_shape=(299,299,3), classes = 1000)
        #x = Dense(num_Classes, activation='softmax', name='predictions')(basemodel.layers[-2].output)
        #model = Model(basemodel.input, x)
    elif(strModelType == "UNET2D"):
        model = build_UNet2D_4L(im_Shape, strPretrained)
    elif(strModelType == "CNN6Layers"):    
        model = build_CNN_6layers(im_Shape, num_classes = num_Classes)
    else:
        print("No Such Model Type") 
        return None

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    #model.summary()

    return model
def uResNet34Xceptionregr(input_size=None,
                          unet_weights=None,
                          weights=None,
                          n_classes=4,
                          freeze_unet=False):
    assert any([unet_weights is None,
                weights is None])  # do not load both weights

    # U-Net
    if K.image_data_format() == "channels_last":
        input_shape = input_size + (3, )
    else:
        input_shape = (3, ) + input_size
    model_unet = uResNet34(input_size=input_size,
                           encoder_weights=None,
                           weights=unet_weights,
                           n_classes=n_classes)
    if freeze_unet:
        print("Freeze U-Net")
        for layer in model_unet.layers:
            layer.trainable = False
    img_input = Input(shape=input_shape)
    model_unet(img_input)

    # Regression
    if K.image_data_format() == "channels_last":
        input_shape = input_size + (n_classes, )
    else:
        input_shape = (n_classes, ) + input_size
    model = Xception(input_shape=input_shape,
                     include_top=False,
                     weights=None,
                     pooling="avg")
    x = model(model_unet.get_output_at(0))
    x = Dense(1, activation="linear", name="predictions")(x)

    # Create model.
    model = Model(model_unet.get_input_at(0), x, name="uResNet34Xceptionregr")

    if weights is not None:
        print("Load weights from", weights)
        model.load_weights(weights)

    optimizer = Adam(decay=1e-4)
    # optimizer = SGD(momentum=0.95, decay=0.0005, nesterov=True)

    model.compile(loss="mse", optimizer=optimizer, metrics=["mse"])

    return model
Beispiel #5
0
def main(data_dir='../data', scripts_dir='./', use_adv=True):
    file = File(path.join(data_dir, 'sample.h5'))
    x_data = file['x_data_adv'][:] if int(use_adv) else file['x_data'][:]
    y_data = file['y_data'][:]

    inc = InceptionV3()
    inc.compile(loss='categorical_crossentropy',
                optimizer='sgd',
                metrics=['accuracy'])
    xc = Xception()
    xc.compile(loss='categorical_crossentropy',
               optimizer='sgd',
               metrics=['accuracy'])

    inc_adv = load_model(path.join(scripts_dir, 'inception_adv.h5'))
    inc_adv.compile(loss='categorical_crossentropy',
                    optimizer='sgd',
                    metrics=['accuracy'])
    xc_adv = load_model(path.join(scripts_dir, 'xception_adv.h5'))
    xc_adv.compile(loss='categorical_crossentropy',
                   optimizer='sgd',
                   metrics=['accuracy'])

    for net_name, net in (('inception', inc), ('inception_adv', inc_adv),
                          ('xception', xc), ('xception_adv', xc_adv)):
        for f in (filter_median, partial(denoise_nl_means, multichannel=True),
                  partial(denoise_wavelet, multichannel=True),
                  partial(denoise_bilateral, multichannel=True),
                  partial(denoise_tv_chambolle, multichannel=True),
                  partial(gaussian, multichannel=True), nofilter):
            try:
                imgs = apply_filter(x_data, f)
                imgs = preprocess_input(imgs.astype('float32'))
                loss, acc = net.evaluate(imgs,
                                         y_data,
                                         batch_size=64,
                                         verbose=0)
                fname = f.func.__name__ if hasattr(f, 'func') else f.__name__
                logger.info(
                    f'{net_name} with {fname}: loss {loss:.3f}, accuracy {acc:.3f}'
                )
            except:
                logger.exception(f'{f} failed')
Beispiel #6
0
def Xception_model():
    input_tensor = Input(shape=(dimension, dimension, number_of_channels))
    model = Xception(input_tensor=input_tensor,
                     weights='imagenet',
                     include_top=True)
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []
    x = Dense(number_of_classes, activation='softmax')(model.output)
    model = Model(model.input, x)

    # the first 24 layers are not trained
    for layer in model.layers[:24]:
        layer.trainable = False

    lrate = 0.001
    decay = 0.000001
    adam = Adam(lr=lrate, decay=decay)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    print(model.summary())
    return model
Beispiel #7
0
def main():
    # Parameters
    if len(sys.argv) == 4:
        superclass = sys.argv[1]
        imgmove = sys.argv[2]
        if imgmove == 'False':
            imgmove = False
        else:
            imgmove = True
        lr = float(sys.argv[3])
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    trainpath = 'trainval_' + superclass + '/train'
    valpath = 'trainval_' + superclass + '/val'

    if not os.path.exists('model'):
        os.mkdir('model')

    # Train/validation data preparation
    if imgmove:
        os.mkdir('trainval_' + superclass)
        os.mkdir(trainpath)
        os.mkdir(valpath)
        sourcepath = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date+'_crop'\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
        categories = os.listdir(sourcepath)
        for eachclass in categories:
            if eachclass[0] == superclass[0]:
                print(eachclass)
                os.mkdir(trainpath + '/' + eachclass)
                os.mkdir(valpath + '/' + eachclass)
                imgs = os.listdir(sourcepath + '/' + eachclass)
                idx = 0
                for im in imgs:
                    if idx % 8 == 0:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            valpath + '/' + eachclass + '/' + im)
                    else:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            trainpath + '/' + eachclass + '/' + im)
                    idx += 1

    # Train and validation ImageDataGenerator
    batchsize = 32

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=15,
                                       width_shift_range=5,
                                       height_shift_range=5,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(trainpath,
                                                        target_size=(72, 72),
                                                        batch_size=batchsize)

    valid_generator = test_datagen.flow_from_directory(valpath,
                                                       target_size=(72, 72),
                                                       batch_size=batchsize)

    # Train MobileNet
    model = Xception(include_top=True,
                     weights=None,
                     input_tensor=None,
                     input_shape=(72, 72, 3),
                     pooling=None,
                     classes=classNum[superclass[0]])
    model.summary()
    model.compile(optimizer=SGD(lr=lr, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    steps_per_epoch = int(train_generator.n / batchsize)
    validation_steps = int(valid_generator.n / batchsize)

    weightname = 'model/mobile_' + superclass + '_wgt.h5'

    if os.path.exists(weightname):
        model.load_weights(weightname)

    checkpointer = ModelCheckpoint(weightname,
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   mode='auto',
                                   period=1)
    model.fit_generator(train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=100,
                        validation_data=valid_generator,
                        validation_steps=validation_steps,
                        callbacks=[checkpointer])
    drop = 0.94
    epochs_drop = 2
    
    if epoch%epochs_drop == 0:
        lrate = initial_lrate * (drop ** np.floor(epoch/epochs_drop)) 
    else:
        lrate = initial_lrate * (drop ** np.floor((epoch-1)/epochs_drop)) 

    print(lrate)
    return lrate

lrate = LearningRateScheduler(step_decay)
	
Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0, amsgrad=False)

model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])

csv_logger = CSVLogger('./training_Xception.log', append=True)
checkpointer = ModelCheckpoint(filepath='./weights_Xception.hdf5', verbose=1, save_best_only=True, monitor = 'val_acc')
callbacks_list = [checkpointer,csv_logger,lrate]

model.fit_generator(traning_set, epochs=100, steps_per_epoch=len(x_train) / 50, validation_steps=len(x_valid) / 50, verbose=1, validation_data=validation_set,callbacks=callbacks_list)

score = model.evaluate(x_valid, y_valid, verbose=0)

print(score)





prediction = Dense(3,activation='softmax',name='new_dene_2')(x)
model_1 = Model(inputs=inception_model_1.input,outputs=prediction)

layers = ['new_pool_1','new_batch_1','new_dense_1','new_batch_2','new_drop_2','new_dene_2']
for layer in layers:
    lr_dict[layer] = 0.0002*10

!pip install keras-lr-multiplier

from keras_lr_multiplier import LRMultiplier

model_1.summary()

model_1.compile(loss='categorical_crossentropy',optimizer=LRMultiplier('adam', lr_dict),metrics=['accuracy'])

train_set_1 = preprocess_input(preprocess_initial(train_files))

test_set_1 = preprocess_input(preprocess_initial(test_files))

valid_set_1 = preprocess_input(preprocess_initial(valid_files))

layers = ['new_pool_1','new_batch_1','new_dense_1','new_batch_2','new_drop_2','new_dene_2']
model_1.get_layer('new_dene_2').set_weights([np.array(weights[10],dtype=np.float32),np.zeros([3],dtype=np.float32)])
model_1.get_layer('new_dense_1').set_weights([np.array(weights[4],dtype=np.float32),np.zeros([512],dtype=np.float32)])

# the first 249 layers and unfreeze the rest:
for layer in model_1.layers[:249]:
   layer.trainable = False
for layer in model_1.layers[249:]:
Beispiel #10
0
x = base_model.output
x = Dense(units=1024, activation="relu")(x)

x = Dense(units=2, activation="softmax")(x)

model = Model(base_model.input, x)

model.summary()

for layer in base_model.layers:
    layer.trainable = False

model.summary()

model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=['accuracy'])
model.fit_generator(train_gen,
                    validation_data=test_gen,
                    steps_per_epoch=947 // 64,
                    validation_steps=406 // 64,
                    epochs=2)

###################################################################

from matplotlib.image import imread

train_arr = []
label_arr = []

for path, subdirs, files in os.walk(root):
Beispiel #11
0
                 input_shape=(299, 299, 3))

x = model.get_layer(index=len(model.layers) - 2).output

print(x)
x = Dense(1)(x)

model = Model(inputs=model.input, outputs=x)
model.summary()

# **Using RMSprop optimizer, mean absolute error with metrics, and mean square erro with loss**

# In[ ]:

opt = RMSprop(lr=0.0001)
model.compile(loss='mean_squared_error', optimizer=opt, metrics=['mae'])

# **Puting the model for fit**
#
# **NOTE: The number of epochs is set to 100**

# In[ ]:

network_history = model.fit(x_train,
                            y_train,
                            batch_size=32,
                            epochs=100,
                            verbose=1,
                            validation_data=(x_val, y_val))

# ### Save the Model Trained
def train_with_sift_features(batch_size, input_shape, x_train, y_train,
                             x_valid, y_valid, sift_features_train,
                             sift_features_valid, model_name, num_workers,
                             resume):
    print('Found {} images belonging to {} classes'.format(len(x_train), 128))
    print('Found {} images belonging to {} classes'.format(len(x_valid), 128))
    train_generator = AugmentedDatasetWithSiftFeatures(x_train,
                                                       y_train,
                                                       sift_features_train,
                                                       batch_size=batch_size,
                                                       input_shape=input_shape)
    valid_generator = DatasetWithSiftFeatures(x_valid,
                                              y_valid,
                                              sift_features_valid,
                                              batch_size=batch_size,
                                              input_shape=input_shape)
    class_weight = compute_class_weight('balanced', np.unique(y_train),
                                        y_train)
    class_weight_dict = dict.fromkeys(np.unique(y_train))
    for key in class_weight_dict.keys():
        class_weight_dict.update({key: class_weight[key]})

    filepath = 'checkpoint/{}/sift_iter1.hdf5'.format(model_name)
    save_best = ModelCheckpoint(filepath=filepath,
                                verbose=1,
                                monitor='val_acc',
                                save_best_only=True,
                                mode='max')
    save_on_train_end = ModelCheckpoint(filepath=filepath,
                                        verbose=1,
                                        monitor='val_acc',
                                        period=args.epochs)
    reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                  factor=0.2,
                                  patience=2,
                                  verbose=1)
    callbacks = [save_best, save_on_train_end, reduce_lr]

    if resume == 'True':
        print('\nResume training from the last checkpoint')
        model = load_model(filepath)
        trainable_count = int(
            np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
        print('Trainable params: {:,}'.format(trainable_count))
        model.fit_generator(generator=train_generator,
                            epochs=args.epochs,
                            callbacks=callbacks,
                            validation_data=valid_generator,
                            class_weight=class_weight_dict,
                            workers=num_workers)
    else:
        model = Xception(include_top=False, pooling='max')
        sift_features = Input(shape=(512, ))
        x = Concatenate()([model.layers[-1].output, sift_features])
        x = Dense(units=128,
                  activation='linear',
                  name='predictions',
                  kernel_regularizer=regularizers.l2(0.0001))(x)
        model = Model([model.layers[0].input, sift_features], x)

        for layer in model.layers[:-1]:
            layer.trainable = False

        model.compile(optimizer=Adam(lr=0.001),
                      loss='categorical_hinge',
                      metrics=['acc'])
        model.fit_generator(generator=train_generator,
                            epochs=5,
                            callbacks=callbacks,
                            validation_data=valid_generator,
                            class_weight=class_weight_dict,
                            workers=num_workers)
        K.clear_session()

        print("\nFine-tune the network")
        model = load_model(filepath)
        for layer in model.layers:
            layer.trainable = True
        trainable_count = int(
            np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
        print('Trainable params: {:,}'.format(trainable_count))
        model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                      loss='categorical_hinge',
                      metrics=['acc'])
        model.fit_generator(generator=train_generator,
                            epochs=30,
                            callbacks=callbacks,
                            validation_data=valid_generator,
                            class_weight=class_weight_dict,
                            workers=num_workers)
        K.clear_session()
Beispiel #13
0
def main(args):

    # hyper parameters
    batch_size = 16
    num_classes = 102
    epochs = 100

    # Instantiate model
    model = Xception(include_top=True, weights=None, classes=num_classes)

    # prepare data
    x_train = np.load(os.path.join(current_directory, 'x_train.npy'))
    y_train = np.load(os.path.join(current_directory, 'y_train.npy'))
    x_test = np.load(os.path.join(current_directory, 'x_test.npy'))
    y_test = np.load(os.path.join(current_directory, 'y_test.npy'))

    # summary of the model
    model.summary()

    # compile model
    model.compile(
        loss=categorical_crossentropy,
        optimizer=Adadelta(),
        metrics=['accuracy']
    )

    # learning section
    hist = model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(x_test, y_test)
    )

    # evaluation section
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # save graphs
    acc = hist.history['acc']
    val_acc = hist.history['val_acc']
    loss = hist.history['loss']
    val_loss = hist.history['val_loss']

    plt.plot(range(epochs), loss, marker='.', label='acc')
    plt.plot(range(epochs), val_loss, marker='.', label='val_acc')
    plt.legend(loc='best')
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('acc')
    plt.savefig(os.path.join(current_directory, 'acc_xception.png'))
    plt.clf()

    plt.plot(range(epochs), acc, marker='.', label='loss')
    plt.plot(range(epochs), val_acc, marker='.', label='val_loss')
    plt.legend(loc='best')
    plt.grid()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.savefig(os.path.join(current_directory, 'loss_xception.png'))
    plt.clf()
Beispiel #14
0
def train(epochs):
    image_size = (299, 299)
    # variables to hold features and labels
    features = []
    labels = []

    # default setting in keras models
    class_count = 1000
    X_test = []
    name_test = []

    trainData = np.loadtxt("./train.txt", dtype="str", delimiter=' ')
    for k in range(len(trainData)):
        aLine = trainData[k]
        image_path = aLine[0]
        label = int(aLine[1])
        ground_truth = np.zeros(class_count, dtype=np.float32)
        ground_truth[label] = 1

        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        labels.append(ground_truth)
        features.append(x[0])

    trainData = np.loadtxt("./val.txt", dtype="str", delimiter=' ')
    for k in range(len(trainData)):
        aLine = trainData[k]
        image_path = aLine[0]
        label = int(aLine[1])
        ground_truth = np.zeros(class_count, dtype=np.float32)
        ground_truth[label] = 1

        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        labels.append(ground_truth)
        features.append(x[0])

    testData = np.loadtxt("./test.txt", dtype="str", delimiter=' ')
    for k in range(len(testData)):
        aLine = testData[k]
        image_path = aLine
        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        X_test.append(x[0])
        name_test.append(image_path)

    X_train = features
    y_train = labels

    X_train = np.array(X_train)
    Y_train = np.array(y_train)

    # test image
    X_test = np.array(X_test)

    # Use Xception
    model = Xception(include_top=True, weights='imagenet', classes=class_count)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(X_train, Y_train, epochs=epochs, verbose=1, validation_split=0.3)

    Y_pred = model.predict(X_test)

    f = open('project2_08573584.txt', 'w')
    for i in range(len(name_test)):
        predict = Y_pred[i].argmax(axis=0)
        f.write(str(predict) + '\n')
    f.close()
Beispiel #15
0
def fine_tune(name,
              name_ext,
              lr=1e-4,
              reduce_lr_factor=0.1,
              reduce_lr_patience=3,
              epochs=10,
              batch_size=32,
              l2_reg=0,
              dropout_p=0.5,
              num_freeze_layers=0,
              save_best_only=True,
              loss_stop_val=0.00001):

    data_info = load_organized_data_info(imgs_dim=HEIGHT, name=name)
    tr_datagen = ImageDataGenerator(
        preprocessing_function=preprocess_input,
        rotation_range=180,
        vertical_flip=True,
        horizontal_flip=True,
        # width_shift_range=0.1,
        # height_shift_range=0.1,
        # zoom_range=0.1,
        # shear_range=0.3,
        # fill_mode='reflect'
    )
    val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
    batch_size = 32

    def dir_datagen(dir_, gen):
        return gen.flow_from_directory(directory=dir_,
                                       target_size=(HEIGHT, WIDTH),
                                       class_mode='categorical',
                                       batch_size=batch_size,
                                       shuffle=True)

    dir_tr, num_tr = data_info['dir_tr'], data_info['num_tr']
    dir_val, num_val = data_info['dir_val'], data_info['num_val']

    top_classifier_file = join(MODELS_DIR, TOP_CLASSIFIER_FILE.format(name))
    model_file = join(MODELS_DIR, MODEL_FILE.format(name, name_ext))

    model = Xception(weights='imagenet', include_top=False, pooling='avg')
    top_classifier = _top_classifier(l2_reg=l2_reg,
                                     dropout_p=dropout_p,
                                     input_shape=(2048, ))
    top_classifier.load_weights(top_classifier_file)
    model = Model(inputs=model.input, outputs=top_classifier(model.output))
    model.compile(Adam(lr=lr), loss='categorical_crossentropy')

    # model has 134 layers
    for layer in model.layers[:num_freeze_layers]:
        layer.trainable = False

    log_dir = join(EXPERIMENTS_DIR, 'xception_fine_tuned_{:s}'.format(name))
    callbacks = [
        EarlyStoppingByLoss(monitor='loss', value=loss_stop_val),
        ReduceLROnPlateau(factor=reduce_lr_factor,
                          patience=reduce_lr_patience),
        ModelCheckpoint(model_file, save_best_only=save_best_only),
        TensorBoard(log_dir=log_dir, write_graph=False)
    ]

    model.fit_generator(generator=dir_datagen(dir_tr, tr_datagen),
                        steps_per_epoch=ceil(num_tr / batch_size),
                        epochs=epochs,
                        validation_data=dir_datagen(dir_val, val_datagen),
                        validation_steps=ceil(num_val / batch_size),
                        callbacks=callbacks)
    total_samples_test = getNumSamples(variants[num_variant][1][0:4] + '.h5')

    x_test = np.zeros(
        (total_samples_test, INPUT_FRAME_SIZE, INPUT_FRAME_SIZE, 3),
        dtype='float16')
    y_test = np.zeros((total_samples_test, 1), dtype='float16')

    print('Test dataset loaded')

    print('Testing dataset size = ', x_test.shape)

    print('Loading model')
    model = Xception(weights=None, include_top=True, classes=NUMBER_OF_CLASSES)
    rms_prop = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    model.compile(loss='categorical_crossentropy',
                  optimizer=rms_prop,
                  metrics=['accuracy', 'mse'])
    print("Model loaded")

    # autosave best Model
    best_model_file = '{}_{}_{}_B{}_E{}_F{}.h5'.format(NET_NAME,
                                                       TRAIN_SET[-11:-7],
                                                       TEST_SET[-11:-7],
                                                       BATCH_SIZE, EPOCHS,
                                                       INPUT_FRAME_SIZE)
    best_model = ModelCheckpoint(best_model_file,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True)

    print("*************************************")
Beispiel #17
0
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['categorical_accuracy'])

    bind_model(model)
    
    model = NASNetLarge(input_shape=(h, w, 3), include_top=True, classes = 4, weights=None)
    adam = optimizers.Adam(lr=learning_rate, decay=1e-5)                    # optional optimization
    sgd = optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
    bind_model(model)
    """
    ##
    
    model = Xception(input_shape=(h, w, 3), include_top=True, classes = 4, weights=None)
    adam = optimizers.Adam(lr=learning_rate, decay=1e-5)                    # optional optimization
    sgd = optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
    bind_model(model)
    
    """
    model = cnn_sample(in_shape=(h, w, 3), num_classes=4)
    adam = optimizers.Adam(lr=learning_rate, decay=1e-5)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
    bind_model(model)
    """
    if config.pause:  ## test mode일 때
        print('Inferring Start...')
        nsml.paused(scope=locals())

    if config.mode == 'train':  ### training mode일 때
        print('Training Start...')
Beispiel #18
0
# from keras.utils import multi_gpu_model
# from keras.models import
import numpy as np
import datetime

num_samples = 100
height = 71
width = 71
num_classes = 100

start = datetime.datetime.now()
with tf.device('/gpu:0'):
    model = Xception(weights=None,
                     input_shape=(height, width, 3),
                     classes=num_classes)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Generate dummy data.
    x = np.random.random((num_samples, height, width, 3))
    y = np.random.random((num_samples, num_classes))

    model.fit(x, y, epochs=3, batch_size=16)

    model.save('my_model.h5')

end = datetime.datetime.now()
time_delta = end - start
print('GPU 처리시간 : ', time_delta)

start = datetime.datetime.now()
with tf.device('/cpu:0'):