コード例 #1
2
def get_generators():
    train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        horizontal_flip=True,
        rotation_range=10.,
        width_shift_range=0.2,
        height_shift_range=0.2)

    test_datagen = ImageDataGenerator(rescale=1./255)

    train_generator = train_datagen.flow_from_directory(
        os.path.join('data', 'train'),
        target_size=(299, 299),
        batch_size=32,
        classes=data.classes,
        class_mode='categorical')

    validation_generator = test_datagen.flow_from_directory(
        os.path.join('data', 'test'),
        target_size=(299, 299),
        batch_size=32,
        classes=data.classes,
        class_mode='categorical')

    return train_generator, validation_generator
コード例 #2
0
ファイル: unet.py プロジェクト: Ozzyz/ship_detection
def load_data_generator(train_folderpath, mask_folderpath, img_size = (768, 768), mask_size=(768,768), batch_size=32):
    """
    Returns a data generator with masks and training data specified by the directory paths given.
    """
    data_gen_args = dict(
                        width_shift_range=0.2,
                        height_shift_range=0.2,
                        horizontal_flip=True,
                        rotation_range=10,
                        zoom_range=0.2,
                        fill_mode="constant", 
                        cval=0       
                        )

    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)
        
    seed = 42
    
    image_generator = image_datagen.flow_from_directory(train_folderpath, class_mode=None,
        target_size = img_size, seed=seed, color_mode = 'rgb', batch_size=batch_size)
    mask_generator = mask_datagen.flow_from_directory(mask_folderpath, class_mode=None, 
        target_size = mask_size,seed=seed, color_mode='grayscale', batch_size=batch_size)

    return zip(image_generator, mask_generator)
コード例 #3
0
ファイル: train.py プロジェクト: nirmalyaghosh/mini-projects
def _get_data_generators(img_width, img_height, labels):
    train_datagen = ImageDataGenerator(
        fill_mode="nearest",
        horizontal_flip=True,
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        batch_size=32,
        classes=labels,
        target_size=(img_width, img_height),
        class_mode="categorical")

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        batch_size=32,
        classes=labels,
        target_size=(img_width, img_height),
        class_mode="categorical")

    return train_generator, validation_generator
コード例 #4
0
def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1. / 255)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')

    generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_train = model.predict_generator(
        generator, nb_train_samples // batch_size)
    np.save(open('bottleneck_features_train.npy', 'wb'),
            bottleneck_features_train)

    generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_validation = model.predict_generator(
        generator, nb_validation_samples // batch_size)
    np.save(open('bottleneck_features_validation.npy', 'wb'),
            bottleneck_features_validation)
コード例 #5
0
ファイル: SVGG.py プロジェクト: Harlaus/zhihu
def CNN(trainDir, validationDir, classNum):
    model = Sequential()
    model.add(Convolution2D(4, 3, 3, input_shape=(img_width, img_height, 1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(4, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Convolution2D(16, 3, 3))
    # model.add(Activation('relu'))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Dense(classNum))
    model.add(Activation('softmax'))
    # test
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # this is the augmentation configuration we will use for training
    train_datagen = ImageDataGenerator(
            rescale=1./255,
            shear_range=0.2,
            zca_whitening=True,
            zoom_range=0.2,
            horizontal_flip=False)
    # this is the augmentation configuration we will use for testing:
    # only rescaling
    test_datagen = ImageDataGenerator(rescale=1./255, zca_whitening=True)
    train_generator = train_datagen.flow_from_directory(
            trainDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
            validationDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    model.fit_generator(
            train_generator,
            samples_per_epoch=nb_train_samples,
            nb_epoch=nb_epoch,
            validation_data=validation_generator,
            nb_val_samples=nb_validation_samples)
    return model
コード例 #6
0
    def train_model(self):
        sgd=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy',
                optimizer=sgd,
                #optimizer='rmsprop',
                metrics=['accuracy'])
        #自动扩充训练样本
        train_datagen = ImageDataGenerator(
        rescale = 1./255,
        shear_range = 0.2,
        zoom_range = 0.2,
        horizontal_flip=True)
        #归一化验证集
        val_datagen = ImageDataGenerator(
                rescale = 1./255)
        eval_datagen = ImageDataGenerator(
                rescale = 1./255)
        #以文件分类名划分label
        train_generator = train_datagen.flow_from_directory(
                root_path+'/train',
                target_size=(img_size,img_size),
                color_mode='grayscale',
                batch_size=batch_siz,
                class_mode='categorical')
        val_generator = val_datagen.flow_from_directory(
                root_path+'/val',
                target_size=(img_size,img_size),
                color_mode='grayscale',
                batch_size=batch_siz,
                class_mode='categorical')
        eval_generator = eval_datagen.flow_from_directory(
                root_path+'/test',
                target_size=(img_size,img_size),
                color_mode='grayscale',
                batch_size=batch_siz,
                class_mode='categorical')
        early_stopping = EarlyStopping(monitor='loss',patience=3)
        history_fit=self.model.fit_generator(
                train_generator,
                steps_per_epoch=800/(batch_siz/32),#28709
                nb_epoch=nb_epoch,
                validation_data=val_generator,
                validation_steps=2000,
                #callbacks=[early_stopping]
                )
#         history_eval=self.model.evaluate_generator(
#                 eval_generator,
#                 steps=2000)
        history_predict=self.model.predict_generator(
                eval_generator,
                steps=2000)
        with open(root_path+'/model_fit_log','w') as f:
            f.write(str(history_fit.history))
        with open(root_path+'/model_predict_log','w') as f:
            f.write(str(history_predict))
#         print("%s: %.2f%%" % (self.model.metrics_names[1], history_eval[1] * 100))
        print('model trained')
def train():
    """Use fine-tuning to train a network on a new dataset"""
    train_count = get_file_count(FLAGS.train_dir)
    class_count = len(glob.glob(FLAGS.train_dir + "/*"))
    val_count = get_file_count(FLAGS.val_dir)
    epochs = int(FLAGS.epochs)
    batch_size = int(FLAGS.batch_size)
    target_size = (int(FLAGS.resolution), int(FLAGS.resolution))

    train_datagen =  ImageDataGenerator(
        preprocessing_function=preprocess_input,
        rotation_range=30,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
    )
    test_datagen = ImageDataGenerator(
        preprocessing_function=preprocess_input,
        rotation_range=30,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
    )

    train_generator = train_datagen.flow_from_directory(
        FLAGS.train_dir,
        target_size=target_size,
        batch_size=batch_size
    )

    validation_generator = test_datagen.flow_from_directory(
        FLAGS.val_dir,
        target_size=target_size,
        batch_size=batch_size
    )

    model = create_model(class_count) 
    model = freeze_layers(model)
    early_stopping = EarlyStopping(monitor='val_loss', patience=2)

    model.fit_generator(
        train_generator,
        steps_per_epoch=train_count/batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=val_count/batch_size,
        class_weight='auto',
        callbacks=[early_stopping]
    )

    model.save(FLAGS.output_model_file)
コード例 #8
0
ファイル: common.py プロジェクト: lssxfy123/PythonStudy
def data_generator(target_size):
    train_datagen = ImageDataGenerator(rescale=1./255,
                                   rotation_range=30,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
    valid_datagen = ImageDataGenerator(rescale=1./255)
    train_generator = train_datagen.flow_from_directory(train_path, target_size=target_size,
                                                    batch_size=batch_size, class_mode='binary')
    valid_generator = valid_datagen.flow_from_directory(valid_path, target_size=target_size,
                                                    batch_size=batch_size, class_mode='binary')
    
    return train_generator, valid_generator
コード例 #9
0
ファイル: common.py プロジェクト: lssxfy123/PythonStudy
def extract_features(base_model, target_size, preprocess):
    datagen = ImageDataGenerator(preprocessing_function=preprocess)
    train_generator = datagen.flow_from_directory(train_path, target_size=target_size,
                                                    batch_size=batch_size, class_mode='binary', shuffle=False)
    valid_generator = datagen.flow_from_directory(valid_path, target_size=target_size,
                                                    batch_size=batch_size, class_mode='binary', shuffle=False)
    train_features = base_model.predict_generator(train_generator, train_generator.samples // batch_size, verbose=1)
    valid_features = base_model.predict_generator(valid_generator, valid_generator.samples // batch_size, verbose=1)
    
    features_name = '{0}_features.npz'.format(base_model.name)
    np.savez(features_name,train=train_features, train_label=train_generator.classes,
             valid=valid_features, valid_label=valid_generator.classes)
    return features_name
コード例 #10
0
ファイル: main3_ensembling.py プロジェクト: Rhuax/dcsn
def base_model_finetuning():
    base_model = InceptionV3(weights='imagenet', include_top=False)

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)

    predictions = Dense(8, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    """f = open('structure.json', 'w')
    f.write(model.to_json())
    f.close()"""
    # Freeze inception layers
    for layer in base_model.layers:
        layer.trainable = False

    model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
    datagen = ImageDataGenerator(
        rotation_range=30,
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.2,
        rescale=1. / 255,
        zoom_range=(0.8, 1.1),
        horizontal_flip=True,
        vertical_flip=True,
        fill_mode='nearest'
    )
    datagen2 = ImageDataGenerator(
        rescale=1. / 255
    )
    gen1 = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=16)

    gen2 = datagen2.flow_from_directory(validation_data_dir, target_size=(img_width, img_height),
                                        batch_size=batch_size)
    snapshot_ens=SnapshotCallbackBuilder(nb_epochs=20,nb_snapshots=5)


    model.fit_generator(gen1,

                        steps_per_epoch=nb_train_samples // 16,
                        epochs=20,
                        validation_data=gen2,
                        validation_steps=nb_validation_samples // 16,
                        callbacks=snapshot_ens.get_callbacks(model_prefix='inception_ens')
                        )
コード例 #11
0
    def prepare_generators(self):
        training_data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
        validation_data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)

        self.training_generator = training_data_generator.flow_from_directory(
            "datasets/current/training",
            target_size=(self.input_shape[0], self.input_shape[1]),
            batch_size=32
        )

        self.validation_generator = validation_data_generator.flow_from_directory(
            "datasets/current/validation",
            target_size=(self.input_shape[0], self.input_shape[1]),
            batch_size=32
        )
コード例 #12
0
ファイル: vae_conv_2.py プロジェクト: stuartlynn/MLPlayground
def autoGenerator():
  image_loader = ImageDataGenerator(rescale=1./255.)
  loader = image_loader.flow_from_directory("/data/tiles/", color_mode='rgb',batch_size=batch_size, target_size=(256,256), class_mode='binary')
  for batch in loader:
    if np.isnan(batch[0]).any():
	print 'problem with batch'
    yield (batch[0],np.copy(batch[0])) 
コード例 #13
0
ファイル: img_clf.py プロジェクト: jannson/Similar
def predict_labels(model):
    """writes test image labels and predictions to csv"""
    
    test_datagen = ImageDataGenerator(rescale=1./255)
    test_generator = test_datagen.flow_from_directory(
        test_data_dir,
        target_size=(img_height, img_width),
        batch_size=32,
        shuffle=False,
        class_mode=None)

    base_path = "../data/test/test/"

    with open("prediction.csv", "w") as f:
        p_writer = csv.writer(f, delimiter=',', lineterminator='\n')
        for _, _, imgs in os.walk(base_path):
            for im in imgs:
                pic_id = im.split(".")[0]
                img = load_img(base_path + im)
                img = imresize(img, size=(img_height, img_width))
                test_x = img_to_array(img).reshape(3, img_height, img_width)
                test_x = test_x.reshape((1,) + test_x.shape)
                test_generator = test_datagen.flow(test_x,
                                                   batch_size=1,
                                                   shuffle=False)
                prediction = model.predict_generator(test_generator, 1)[0][0]
                p_writer.writerow([pic_id, prediction])
コード例 #14
0
ファイル: common.py プロジェクト: lssxfy123/PythonStudy
def extract_test_features(base_model, target_size, preprocess):
    datagen = ImageDataGenerator(preprocessing_function=preprocess)
    test_generator = datagen.flow_from_directory(test_path, target_size=target_size, batch_size=batch_size, class_mode=None, shuffle=False)
    test_features = base_model.predict_generator(test_generator, test_generator.samples // batch_size, verbose=1)
    
    test_features_name = 'test_{0}_features.npz'.format(base_model.name)
    np.savez(test_features_name, test=test_features, test_filename=test_generator.filenames)
    return test_features_name
コード例 #15
0
    def prepare_data(self):
        train_data_dir = dir_path + "/card_training"
        validation_data_dir = dir_path + "/card_testing"
        test_data_dir = validation_data_dir

        img_height = 50
        img_width = 15
        batch_size = 52

        train_datagen = ImageDataGenerator(
            rescale=0.02,
            shear_range=0.01,
            zoom_range=0.02,
            horizontal_flip=False)

        validation_datagen = ImageDataGenerator(
            rescale=0.01,
            shear_range=0.05,
            zoom_range=0.05,
            horizontal_flip=False)

        test_datagen = ImageDataGenerator(
            rescale=0.02)

        train_generator = train_datagen.flow_from_directory(
            train_data_dir,
            target_size=(img_height, img_width),
            batch_size=batch_size,
            class_mode='binary',
            color_mode='rgb')

        validation_generator = validation_datagen.flow_from_directory(
            validation_data_dir,
            target_size=(img_height, img_width),
            batch_size=batch_size,
            class_mode='binary',
            color_mode='rgb')

        test_generator = test_datagen.flow_from_directory(
            test_data_dir,
            target_size=(img_height, img_width),
            batch_size=batch_size,
            class_mode='binary',
            color_mode='rgb')

        return train_generator, validation_generator, test_generator
コード例 #16
0
def getDataGenerator(dir,  img_width, img_height, batch_size):
  datagen = ImageDataGenerator(rescale=1./255)
  generator = datagen.flow_from_directory(
        dir,  
        target_size=(img_width, img_height), 
        batch_size=batch_size,
        class_mode='categorical') 
  return generator
コード例 #17
0
ファイル: main3_ensembling.py プロジェクト: Rhuax/dcsn
def test_ensembles():
    from keras.models import model_from_json
    m_prec=0
    m_rec=0
    m_fmeas=0
    m_acc=0
    for name_file in os.listdir('weights/'):
        f = open('backend_inceptionv2.json', 'r')
        model=None
        model = model_from_json(f.read())
        f.close()
        model.load_weights('weights/'+name_file)
        from keras.optimizers import SGD
        model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
        datagen2 = ImageDataGenerator(
            rescale=1. / 255
        )
        generator = datagen2.flow_from_directory(
            'test/',
            target_size=(img_width, img_height),
            batch_size=50,
            class_mode=None,
            shuffle=False)
        np.set_printoptions(suppress=True)
        predictions = model.predict_generator(generator, 8)
        index = 0
        confusion_matrix = np.zeros((8, 8))
        for i in predictions:
            true_class = index // 50
            confusion_matrix[np.argmax(i)][true_class] += 1
            index += 1
        tps = confusion_matrix.diagonal()
        fps = np.sum(confusion_matrix, (0))
        fps -= tps
        fns = np.sum(confusion_matrix, (1))
        fns -= tps
        precision = tps / (np.sum(confusion_matrix, (1)))
        recall = tps / (np.sum(confusion_matrix, (0)))
        accuracy = np.sum(tps) / (np.sum(confusion_matrix))
        f_measure = (2 * precision * recall) / (precision + recall)
        m_prec+=np.mean(precision)
        m_rec+=np.mean(recall)
        m_fmeas+=np.mean(f_measure)
        m_acc+=accuracy
        print('p:',end='')
        print(np.mean(precision))
        print('r:', end='')
        print(np.mean(recall))
        print('fm:', end='')
        print(np.mean(f_measure))
        print('a:', end='')
        print(accuracy)
        print('-------------')
    print('final precision ' + str(m_prec / 5))
    print('final recall ' + str(m_rec / 5))
    print('final fmeas ' + str(m_fmeas / 5))
    print('final accura ' + str(m_acc / 5))
コード例 #18
0
ファイル: helper.py プロジェクト: shayanpr/Enhance
def Picgenerator(directory, batch_size=32, target=(256, 256)):
    generator_mod = ImageDataGenerator()
    generator = generator_mod.flow_from_directory(directory=directory, batch_size=batch_size,
                                                  target_size=(target[0], target[1]),
                                                  color_mode='rgb', class_mode=None)
    while True:
        batch = generator.next()
        y = batch.astype('float32') / 255.
        y_train = y  # np.array(vimresize(y, size=target))
        x_train = np.array(vimresize(y, size=(int(target[0]*0.25), int(target[1]*0.25))))
        yield x_train, y_train
コード例 #19
0
ファイル: vgg_bn.py プロジェクト: jannson/Similar
    def test(self, test_path, nb_test_samples, aug=False):
        if aug:
            test_datagen = ImageDataGenerator(rotation_range=10, width_shift_range=0.05, zoom_range=0.05,
                                               channel_shift_range=10, height_shift_range=0.05, shear_range=0.05,
                                               horizontal_flip=True)
        else:
            test_datagen = ImageDataGenerator()

        test_gen = test_datagen.flow_from_directory(test_path, target_size=self.size, batch_size=self.batch_size,
                                                    class_mode=None, shuffle=False)

        return self.model.predict_generator(test_gen, val_samples=nb_test_samples), test_gen.filenames
コード例 #20
0
ファイル: dogdentity.py プロジェクト: CharlesLynn/DogDenity
def fit_image_generators(train_data_dir, validation_data_dir, img_width, img_height):
    # Augmentation configuration used for training
    train_datagen = ImageDataGenerator(
        rescale=1.0 / 255,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode="nearest",
    )

    # Augmentation configuration used for testing:
    # only normalizing
    test_datagen = ImageDataGenerator(rescale=1.0 / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        # class_mode='binary',
        shuffle=True,
    )

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        # class_mode='binary',
        shuffle=True,
    )

    classes = train_generator.nb_class
    n_train_samples = train_generator.nb_sample
    n_val_samples = validation_generator.nb_sample

    return train_generator, validation_generator, classes, n_train_samples, n_val_samples
コード例 #21
0
ファイル: vgg_bn.py プロジェクト: jannson/Similar
    def fit(self, trn_path, val_path, nb_trn_samples, nb_val_samples, nb_epoch=1, callbacks=None, aug=False):
        if aug:
            train_datagen = ImageDataGenerator(rotation_range=10, width_shift_range=0.05, zoom_range=0.05,
                                               channel_shift_range=10, height_shift_range=0.05, shear_range=0.05,
                                               horizontal_flip=True)
        else:
            train_datagen = ImageDataGenerator()

        trn_gen = train_datagen.flow_from_directory(trn_path, target_size=self.size, batch_size=self.batch_size,
                                                      class_mode='categorical', shuffle=True)

        val_gen = ImageDataGenerator().flow_from_directory(val_path, target_size=self.size, batch_size=self.batch_size,
                                                           class_mode='categorical', shuffle=True)

        self.model.fit_generator(trn_gen, samples_per_epoch=nb_trn_samples, nb_epoch=nb_epoch, verbose=2,
                validation_data=val_gen, nb_val_samples=nb_val_samples, callbacks=callbacks)
コード例 #22
0
def get_categorical_labels(dir, feature_file):
    datagen_top = ImageDataGenerator(rescale=1. / 255)
    generator_top = datagen_top.flow_from_directory(
        dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical',
        shuffle=False)

    num_classes = len(generator_top.class_indices)
    data = np.load(feature_file)

    labels = generator_top.classes

    labels = to_categorical(labels, num_classes=num_classes)
    return data, labels, num_classes
コード例 #23
0
ファイル: evalmodel.py プロジェクト: loganzkatz/identifido
def eval(tvta, torf, model_name):

    img_dir = G.DAT + tvta + '/'

    if torf == 'temp':
        temp_or_final = '/temp_model.hdf5' 
    else:
        temp_or_final = '/final_model.hdf5'
    model = load_model(G.MOD + model_name + temp_or_final)



    # parameters
    img_height, img_width = 128, 128

    # this is the augmentation configuration we will use for training
    datagen = ImageDataGenerator(
            rescale=1./255,
            fill_mode='constant')

    gen = datagen.flow_from_directory(
            img_dir,
            target_size=(img_width, img_height),
            batch_size=64,
            shuffle=False
            )

    actual = gen.classes
    predict = model.predict_generator(gen, gen.N)
    
    arr = actual[:, np.newaxis]
    final_arr = np.append(arr, arr, axis=1)

    for idx in xrange(len(actual)):
        final_arr[idx,1] = np.argmax(predict[idx])
        
    df = pd.DataFrame(final_arr, columns=['actual', 'pred'])    
    
    return df
コード例 #24
0
ファイル: data_loader.py プロジェクト: puke3615/GanForFace
def load_data(batch_size=32, height=28, width=28, imgaug=False, path=PATH):
    if not os.path.exists(path):
        raise Exception('File folder "%s" not found' % path)
    generator = ImageDataGenerator(
        # samplewise_std_normalization=True,
        # samplewise_center=True,
        # channel_shift_range=15,
        horizontal_flip=True,
        # rotation_range=15,
        # width_shift_range=.2,
        # height_shift_range=.2,
        # zoom_range=.01,
    )
    iterator = generator.flow_from_directory(
        path, target_size=(height, width), batch_size=batch_size)
    if imgaug:
        while True:
            batch, _ = next(iterator)
            batch = img_aug(batch)
            yield batch, _
    else:
        while True:
            yield next(iterator)
コード例 #25
0
x = Dropout(0.5)(x)
x = Dense(100)(x)
x = BatchNormalization()(x)
predictions = Softmax()(x)

# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)

##########################
# data generator
##########################
train_gen = ImageDataGenerator(preprocessing_function=preprocess_input)

test_gen = ImageDataGenerator(preprocessing_function=preprocess_input)

train_generator = train_gen.flow_from_directory("./data/train", (299, 299),
                                                batch_size=16)

test_generator = test_gen.flow_from_directory("./data/test", (299, 299),
                                              shuffle=False,
                                              batch_size=16)

##########################
# freeze some layers
##########################

# for layer in model.layers[:56]:
#     layer.trainable = False

# for i, layer in enumerate(model.layers):
#     print(i, layer.name, layer.trainable)
import call_model


warnings.filterwarnings(
    action='ignore',
    category=UserWarning,
    module=r'.*TiffImagePlugin'
)

train_datagen = ImageDataGenerator(rescale=1./255,
    validation_split=0.3)

train_generator = train_datagen.flow_from_directory(
    TRAIN_AUG_PATH,
    target_size=(HEIGHT, WIDTH),
    batch_size=BATCH_SIZE,
    class_mode='categorical',
    subset='training',
    shuffle=True,
    seed=42)

validation_generator = train_datagen.flow_from_directory(
    TRAIN_AUG_PATH,
    target_size=(HEIGHT, WIDTH),
    batch_size=BATCH_SIZE,
    class_mode='categorical',
    subset='validation',
    shuffle=True,
    seed=42)


tmp = pd.DataFrame(columns=['ClassId', 'ModelId', 'SignName'])
コード例 #27
0
if K.image_data_format() == 'channels_first':
    input_shape = (3, image_width, image_height)
else:
    input_shape = (image_width, image_height, 3)

train_data_gen = ImageDataGenerator(rescale=1. / 255,
                                    rotation_range=40,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True)

validation_data_gen = ImageDataGenerator(rescale=1. / 255)

train_data_generator = train_data_gen.flow_from_directory(
    train_data_path,
    target_size=(image_width, image_height),
    batch_size=batch_size,
    class_mode='categorical')

validation_data_generator = validation_data_gen.flow_from_directory(
    validation_data_path,
    target_size=(image_width, image_height),
    batch_size=batch_size,
    class_mode='categorical')

model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
コード例 #28
0
model.add(layers.Dense(4, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['categorical_accuracy'])

# train_data processing, generator
BATCH_SIZE = 16

train_data_gen = ImageDataGenerator(rescale=1. / 255)
validation_data_gen = ImageDataGenerator(rescale=1. / 255)
test_data_gen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_data_gen.flow_from_directory(
    '../blackpink',
    target_size=(80, 80),  # All image resize
    batch_size=BATCH_SIZE,
    class_mode='categorical')

validation_gen = validation_data_gen.flow_from_directory(
    '../blackpink',
    target_size=(80, 80),
    batch_size=BATCH_SIZE,
    class_mode='categorical')

test_gen = test_data_gen.flow_from_directory('../test_data/image',
                                             target_size=(80, 80),
                                             batch_size=BATCH_SIZE,
                                             class_mode='categorical')

# training
コード例 #29
0
from keras.optimizers import Adam
from keras.layers import MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import cv2
import numpy as np

#Initialize the training and vali Gen
train_dir = 'train'
val_dir = 'test'
train_datagen = ImageDataGenerator(rescale=1. / 255)
val_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(48, 48),
                                                    batch_size=64,
                                                    color_mode="grayscale",
                                                    class_mode='categorical')

validation_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(48, 48),
    batch_size=64,
    color_mode="grayscale",
    class_mode='categorical')

#Build model
model = Sequential()

model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(48, 48, 1)))
model.add(MaxPooling2D(2, 2))
コード例 #30
0
import numpy as np # linear algebra
from keras.models import Sequential
from keras.layers import Dense,Conv2D,MaxPooling2D,Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
import matplotlib.pyplot as plt
import tensorflow as tf



train_datagen= ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip= True)
valid_datagen= ImageDataGenerator(rescale=1./255)

train_set= train_datagen.flow_from_directory('./dataset/train', 
                                             target_size=(80,80), batch_size=28, class_mode='categorical', 
                                             shuffle=True, seed=20)
valid_set= valid_datagen.flow_from_directory('./dataset/validation', 
                                             target_size=(80,80), batch_size=22, class_mode='categorical', 
                                             shuffle=False)


tf.compat.v1.disable_eager_execution()
model=Sequential()
model.add(Conv2D(64, (3,3), input_shape=(80,80,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Flatten())
コード例 #31
0
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# THE FOLLOWING CODE WILL LOAD THE TRAINING AND VALIDATION DATA TO YOUR MODEL NAMED model
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=90,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples // batch_size,
                    epochs=epochs,
                    validation_data=validation_generator,
                    validation_steps=nb_validation_samples // batch_size)
コード例 #32
0
ファイル: flowers_aug.py プロジェクト: tsycnh/PythonML
    Dense(512,activation='relu'),
    Dense(17,activation='softmax')
])
model.summary()

model.compile(loss='categorical_crossentropy',optimizer=optimizers.RMSprop(lr=1e-4),metrics=['acc'])

train_datagen = ImageDataGenerator(rescale=1./255,rotation_range=90,width_shift_range=0.2
                                   ,height_shift_range=0.2,
                                   shear_range=0.1,zoom_range=0.2,
                                   horizontal_flip=True,
                                   vertical_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory("C:\\All\\Data\\flowers17_tsycnh\\train",
                                                    target_size=(150,150),
                                                    batch_size=20,
                                                    class_mode='categorical')

val_generator = val_datagen.flow_from_directory("C:\\All\\Data\\flowers17_tsycnh\\validation",
                                                    target_size=(150,150),
                                                    batch_size=20,
                                                    class_mode='categorical')
for a,b in train_generator:
    print(a.shape)
    print(b.shape)
    break
import time,os
format_time = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime())
log_dir = "./logs/"+format_time
os.mkdir(log_dir)
history = model.fit_generator(train_generator,len(train_generator),
コード例 #33
0
train_dir = r"D:\文件\book\dogs-vs-cats\test_model\train"
validation_dir = r"D:\文件\book\dogs-vs-cats\test_model\validation"

#数据预处理
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(150, 150),
                                                    batch_size=20,
                                                    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                        target_size=(150, 150),
                                                        batch_size=20,
                                                        class_mode='binary')

model = dog_cat_model()
# print(model.summary())

history = model.fit_generator(train_generator,
                              steps_per_epoch=100,
                              epochs=30,
                              validation_data=validation_generator,
                              validation_steps=50)
コード例 #34
0
ファイル: cnn_02.py プロジェクト: aftaba/hello-world
              optimizer='rmsprop',
metrics=['accuracy'])

# THE FOLLOWING CODE WILL LOAD THE TRAINING AND VALIDATION DATA TO YOUR MODEL NAMED model
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=90,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

model.fit_generator(
    train_generator,
    steps_per_epoch=nb_train_samples // batch_size,
    epochs=epochs,
    validation_data=validation_generator,
    validation_steps=nb_validation_samples // batch_size)
コード例 #35
0
                                   rescale=1./255,
                                   #shear_range=0.2,
                                   #zoom_range=0.2,
                                   #horizontal_flip=True)
                                   )

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(#samplewise_std_normalization=True,
                                  #samplewise_center=True,
                                  rescale=1./255)


train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                    target_size=(img_width, img_height),
                                                    batch_size=64,
                                                    color_mode="grayscale",
                                                    class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
                                                        
                                                        validation_data_dir,
                                                        target_size=(img_width, img_height),
                                                        batch_size=64,
                                                        color_mode="grayscale",
                                                        class_mode='categorical')

model.fit_generator(
                    train_generator,
                    samples_per_epoch=nb_train_samples,
                    nb_epoch=nb_epoch,
コード例 #36
0
def train_top_model():
    print("In train_top_model")
    datagen_top = ImageDataGenerator(rescale=1. / 255)
    generator_top = datagen_top.flow_from_directory(train_data_dir,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode='categorical',
                                                    shuffle=False)

    nb_train_samples = len(generator_top.filenames)
    num_classes = len(generator_top.class_indices)

    # save the class indices to use use later in predictions
    np.save(config.classIndex, generator_top.class_indices)

    # load the bottleneck features saved earlier
    train_data = np.load(config.trainFeature)

    # get the class lebels for the training data, in the original order
    train_labels = generator_top.classes

    # https://github.com/fchollet/keras/issues/3467
    # convert the training labels to categorical vectors
    train_labels = to_categorical(train_labels, num_classes=num_classes)

    generator_top = datagen_top.flow_from_directory(validation_data_dir,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode=None,
                                                    shuffle=False)

    nb_validation_samples = len(generator_top.filenames)

    validation_data = np.load(config.validationFeature)

    validation_labels = generator_top.classes
    validation_labels = to_categorical(validation_labels,
                                       num_classes=num_classes)

    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='sigmoid'))

    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(train_data,
                        train_labels,
                        epochs=epochs,
                        batch_size=batch_size,
                        validation_data=(validation_data, validation_labels))

    model.save_weights(top_model_weights_path)

    (eval_loss, eval_accuracy) = model.evaluate(validation_data,
                                                validation_labels,
                                                batch_size=batch_size,
                                                verbose=1)

    print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
    print("[INFO] Loss: {}".format(eval_loss))

    plt.figure(1)

    # summarize history for accuracy

    plt.subplot(211)
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')

    # summarize history for loss

    plt.subplot(212)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

    ans = input("would you like to save the bottleneck features?(y/n)"
                )  ###############
    if ans == 'y':
        print("saving files")
        remove('backup/' + config.top_model_weights_path)
        copyfile(config.top_model_weights_path,
                 'backup/' + config.top_model_weights_path)

        remove('backup/' + config.trainFeature)
        copyfile(config.trainFeature, 'backup/' + config.trainFeature)

        remove('backup/' + config.validationFeature)
        copyfile(config.validationFeature,
                 'backup/' + config.validationFeature)

        remove('backup/' + config.classIndex)
        copyfile(config.classIndex, 'backup/' + config.classIndex)

        rmtree('backup/' + config.trainFile)
        copytree(config.trainFile, 'backup/' + config.trainFile)

        rmtree('backup/' + config.validationFile)
        copytree(config.validationFile, 'backup/' + config.validationFile)

        plot = input("Filename:- ")
        if path.exists(plot):
            remove(plot)
        plt.savefig('foo.png')
    else:
        print("recovering files")
        remove(config.top_model_weights_path)
        copyfile('backup/' + config.top_model_weights_path,
                 config.top_model_weights_path)

        remove(config.trainFeature)
        copyfile('backup/' + config.trainFeature, config.trainFeature)

        remove(config.validationFeature)
        copyfile('backup/' + config.validationFeature,
                 config.validationFeature)

        remove(config.classIndex)
        copyfile('backup/' + config.classIndex, config.classIndex)

        rmtree(config.trainFile)
        copytree('backup/' + config.trainFile, config.trainFile)

        rmtree(config.validationFile)
        copytree('backup/' + config.validationFile, config.validationFile)
  optimizer='adam',
  metrics=['accuracy']
)


from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale = 1./255,
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = True)

test_datagen = ImageDataGenerator(rescale = 1./255)

training_set = train_datagen.flow_from_directory('Datasets/Train',
                                                 target_size = (224, 224),
                                                 batch_size = 32,
                                                 class_mode = 'categorical')

test_set = test_datagen.flow_from_directory('Datasets/Test',
                                            target_size = (224, 224),
                                            batch_size = 32,
                                            class_mode = 'categorical')

'''r=model.fit_generator(training_set,
                         samples_per_epoch = 8000,
                         nb_epoch = 5,
                         validation_data = test_set,
                         nb_val_samples = 2000)'''

# fit the model
r = model.fit_generator(
コード例 #38
0
mean_flag = True # if False, then the mean subtraction layer is not prepended

#code ported from https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)

test_datagen = ImageDataGenerator()
                                  

train_generator = train_datagen.flow_from_directory(
       '/home/aleon/data/cats_dogs/train/',
        batch_size=batch_size,
        shuffle=True,
        target_size=input_size[1:],
        class_mode='categorical')  

validation_generator = test_datagen.flow_from_directory(
        '/home/aleon/data/cats_dogs/val/',  
        batch_size=batch_size,
        target_size=input_size[1:],
        shuffle=True,
        class_mode='categorical')


from keras.optimizers import SGD
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='mse',
 #             optimizer=sgd,
コード例 #39
0
ファイル: train.py プロジェクト: vascokk/vball
        Convolution2D(64,(3,3), activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(64, activation='relu'),
        Dropout(0.1),
        Dense(2, activation='softmax')
      ])

    opt = SGD(lr=0.01)
    model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
    return model



input_shape = (size, size, 3)

EPOCHS = 50
cls_n = 2

model = createModel(input_shape, cls_n)

train_datagen = ImageDataGenerator(rescale = 1./255)

training_set = train_datagen.flow_from_directory("train", color_mode="rgb", target_size = (size, size), batch_size = 32, class_mode = 'categorical')
model.fit_generator(training_set, steps_per_epoch = 20, epochs = EPOCHS, validation_steps = 10)

model_json = model.to_json()
with open("./model.json","w") as json_file:
  json_file.write(model_json)

model.save_weights("./model.h5")
コード例 #40
0
import sys
from keras.layers import Input
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras import applications
from keras.models import Sequential, Model, load_model

fp = sys.argv[1]

#train
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
training_set = train_datagen.flow_from_directory(fp,
                                                 target_size=(229, 229),
                                                 batch_size=32,
                                                 class_mode='categorical')

##test
#test_datagen = ImageDataGenerator(rescale=1./255)
#test_set = test_datagen.flow_from_directory('sub_imagenet/val',target_size=(229,229),batch_size=32,class_mode='categorical')

t_model = applications.inception_resnet_v2.InceptionResNetV2(
    weights='imagenet', include_top=False, input_shape=(229, 229, 3))

for layer in t_model.layers:
    layer.trainable = False

x = t_model.output
#x = GlobalAveragepooling2D()(x)
x = Dropout(0.3)(x)
コード例 #41
0
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dropout(0.5))

model.add(Dense(4))
model.add(Activation("softmax"))

model.compile(loss="categorical_crossentropy",
              optimizer="rmsprop",
              metrics=["accuracy"])

train_image_data_generator = ImageDataGenerator(rescale=1. / 255)

train_image_data_flow = train_image_data_generator.flow_from_directory(
    "./Images/Training",
    target_size=(dimensions[0], dimensions[1]),
    batch_size=batch_size,
    class_mode="categorical")

validation_image_data_generator = ImageDataGenerator(rescale=1. / 255)

validation_image_data_flow = validation_image_data_generator.flow_from_directory(
    "./Images/Validation",
    target_size=(dimensions[0], dimensions[1]),
    batch_size=batch_size,
    class_mode="categorical")

sample_amount = 3399
validation_sample_amount = 1021
epochs = 50
epoch_steps = sample_amount // batch_size
コード例 #42
0
def train():

    script_dir = os.path.abspath(os.path.dirname(__file__))
    dir = os.path.join(script_dir, "../../" + model_file_name)

    # Initialising the CNN
    exists = os.path.isfile(dir)

    if exists:
        print('Loading model from file')
        classifier = load_model(dir)
    else:

        classifier = Sequential()

        # Step 1 - Convolution
        classifier.add(
            Conv2D(32, (3, 3), input_shape=(128, 128, 3), activation='relu'))

        # Step 2 - Pooling
        classifier.add(MaxPooling2D(pool_size=(2, 2)))

        # Adding a second convolutional layer
        classifier.add(Conv2D(32, (3, 3), activation='relu'))
        classifier.add(MaxPooling2D(pool_size=(2, 2)))

        # Step 3 - Flattening
        classifier.add(Flatten())

        # Step 4 - Full connection
        classifier.add(Dense(units=128, activation='relu'))
        classifier.add(Dense(units=10, activation='sigmoid'))

        # Compiling the CNN
        classifier.compile(optimizer='adam',
                           loss='binary_crossentropy',
                           metrics=['accuracy'])

        # Part 2 - Fitting the CNN to the images

        from keras.preprocessing.image import ImageDataGenerator

        train_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)

        test_datagen = ImageDataGenerator(rescale=1. / 255)

        training_set = train_datagen.flow_from_directory(
            'digits_dataset/training_set',
            target_size=(128, 128),
            batch_size=32)

        test_set = test_datagen.flow_from_directory('digits_dataset/test_set',
                                                    target_size=(128, 128),
                                                    batch_size=32)

        classifier.fit_generator(training_set,
                                 steps_per_epoch=1000,
                                 epochs=3,
                                 validation_data=test_set,
                                 validation_steps=300)

        print('Saving model!')
        classifier.save(dir)
        print('Save success!')
コード例 #43
0
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True,
                                   fill_mode='nearest')

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(150, 150),
                                                    batch_size=20,
                                                    class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
    validation_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='categorical')

test_generator = test_datagen.flow_from_directory(test_dir,
                                                  target_size=(150, 150),
                                                  batch_size=20,
                                                  class_mode='categorical',
                                                  shuffle=False)

model.compile(loss='categorical_crossentropy',
コード例 #44
0
# The training dataset consists of 2000 images of dogs and cats, split
# in half.  In addition, the validation set consists of 1000 images,
# and the test set of 22000 images.

datapath = "/wrk/makoskel/dogs-vs-cats/train-2000"
(nimages_train, nimages_validation, nimages_test) = (2000, 1000, 22000)

input_image_size = (150, 150)

noopgen = ImageDataGenerator(rescale=1. / 255)

batch_size = 25

print('Test: ', end="")
test_generator = noopgen.flow_from_directory(datapath + '/test',
                                             target_size=input_image_size,
                                             batch_size=batch_size,
                                             class_mode='binary')

# ### Initialization

if len(sys.argv) < 2:
    print('ERROR: model file missing')
    sys.exit()

model = load_model(sys.argv[1])

print(model.summary())

# ### Inference

workers = 14
# Fitting the neural network for the images

from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1./255)

training_set = train_datagen.flow_from_directory(
                  'dataset/training_set',
                  target_size=(64, 64),
                  batch_size=32,
                  class_mode='binary')

test_set = test_datagen.flow_from_directory(
        'dataset/test_set',
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')


###### EXECUTE THIS TO TRAIN MODEL ##############
#classifier.fit_generator( training_set,
 #                   steps_per_epoch=8000,
  #                  epochs=25,
   #                 validation_data=test_set,
コード例 #46
0
def get_imagenet(train_path,
                 test_path,
                 save_path,
                 filename=None,
                 class_idx_path=None):
    """Load imagenet classification dataset.

    Values are normalized and saved as ``float32`` type. Class vectors are
    converted to binary class matrices.

    Three compressed files ``path/filename_x_norm.npz``,
    ``path/filename_x_test.npz``, and ``path/filename_y_test.npz``.
    With data of the form (channels, num_rows, num_cols), ``x_norm`` and
    ``x_test`` have dimension (num_samples, channels, num_rows, num_cols).
    ``y_test`` has dimension (num_samples, num_classes).

    Parameters
    ----------

    class_idx_path :
    train_path : str
        The path of training data
    test_path : str
        The path of testing data (using validation data)
    save_path : str
        If a ``path`` is given, the loaded and modified dataset is saved to
        ``path`` directory.
    filename: Optional[str]
        Basename of file to create. Individual files will be appended
        ``_x_norm``, ``_x_test``, etc.
    """

    if not os.path.isdir(train_path):
        raise ValueError("Training dataset not found!")
    if not os.path.isdir(test_path):
        raise ValueError("Testing dataset not found!")
    if not os.path.isdir(save_path):
        os.makedirs(save_path)

    target_size = (299, 299)
    num_norm_samples = 10
    num_test_samples = 5000

    datagen = ImageDataGenerator()
    # train_dataflow = datagen.flow_from_directory(train_path,
    #                                              target_size=target_size,
    #                                              batch_size=num_norm_samples)
    # x_train, y_train = train_dataflow.next()
    #
    # x_train /= 255.
    # x_train -= 0.5
    # x_train *= 2.

    if class_idx_path:
        import json
        class_idx = json.load(open(class_idx_path, "r"))
        classes = [class_idx[str(idx)][0] for idx in range(len(class_idx))]
    else:
        classes = None

    test_dataflow = datagen.flow_from_directory(test_path,
                                                target_size=target_size,
                                                classes=classes,
                                                batch_size=num_test_samples)
    for i, x_test, y_test in enumerate(test_dataflow):

        x_test = np.add(np.multiply(x_test, 2. / 255.), -1.)

        if filename is None:
            filename = ''
        filepath = os.path.join(save_path, filename)
        step = int(len(x_test) / num_norm_samples)
        np.savez_compressed(filepath + 'x_test' + str(i),
                            x_test.astype('float32'))
        np.savez_compressed(filepath + 'y_test' + str(i),
                            y_test.astype('float32'))
        if i == 0:
            np.savez_compressed(filepath + 'x_norm',
                                x_test[::step].astype('float32'))
コード例 #47
0
model=Model(inputs=base_model.input,outputs=preds)

for i,layer in enumerate(model.layers):
  print(i,layer.name)

for layer in model.layers[:197]:
    layer.trainable=False
for layer in model.layers[197:]:
    layer.trainable=True

train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input)

train_generator=train_datagen.flow_from_directory('/content/train_test/train/',
                                                 color_mode='rgb',
                                                 batch_size=17,
                                                 class_mode='categorical',
                                                 shuffle=True)

checkpoint_path = "/content/gdrive/My Drive/Syrus/weights_inception_full/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)


# Create checkpoint callback
cp_callback = tf.keras.callbacks.ModelCheckpoint(
    checkpoint_path, verbose=1, save_weights_only=True,
    # Save weights, every 5-epochs.
    period=5)

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.load_weights('/content/gdrive/My Drive/Syrus/weights_inception_full/cp-0030.ckpt')
コード例 #48
0
    def __init__(
        self,
        violation_class,
        train_mode,
        pre_trained_model,
        nb_of_conv_layers_to_fine_tune,
        weights_to_file,
        first_phase_trained_weights,
        nb_of_epochs,
        modelCheckpoint_quantity,
        earlyStopping_quantity,
        CSVLogger_filename,
    ):

        # extra check for the case when fine-tuning is selected without providing the correct first_phase_trained_weights.
        if nb_of_conv_layers_to_fine_tune in {
                1, 2, 3
        } and first_phase_trained_weights is None:
            raise ValueError(
                'The `first_phase_trained_weights` argument can be set to None only when '
                '`nb_of_conv_layers_to_fine_tune` is None (feature extraction).'
                'When `nb_of_conv_layers_to_fine_tune` is either 1 or 2, '
                'the weights of an already trained feature extraction model must be saved prior to fine-tuning the model.'
            )

        # Base directory for saving the trained models
        self.trained_models_dir = '/home/gkallia/git/Human-Rights-Violations-Conceptron/trained_models'
        self.feature_extraction_dir = os.path.join(self.trained_models_dir,
                                                   'feature_extraction/')
        self.fine_tuning_dir = os.path.join(self.trained_models_dir,
                                            'fine_tuning/')
        self.logs_dir = os.path.join(self.trained_models_dir, 'logs/')

        if violation_class == 'cl':
            self.train_dir = os.path.join(
                '/home/gkallia/git/Human-Rights-Violations-Conceptron/datasets/Two-class-HRV/ChildLabour',
                'train')
            self.val_dir = os.path.join(
                '/home/gkallia/git/Human-Rights-Violations-Conceptron/datasets/Two-class-HRV/ChildLabour',
                'val')

        elif violation_class == 'dp':
            self.train_dir = os.path.join(
                '/home/gkallia/git/Human-Rights-Violations-Conceptron/datasets/Two-class-HRV/DisplacedPopulations',
                'train')
            self.val_dir = os.path.join(
                '/home/gkallia/git/Human-Rights-Violations-Conceptron/datasets/Two-class-HRV/DisplacedPopulations',
                'val')

        # Augmentation configuration with only rescaling.
        # Rescale is a value by which we will multiply the data before any other processing.
        # Our original images consist in RGB coefficients in the 0-255, but such values would
        # be too high for our models to process (given a typical learning rate),
        # so we target values between 0 and 1 instead by scaling with a 1/255. factor.
        datagen = ImageDataGenerator(rescale=1. / 255)

        img_width, img_height = 224, 224

        self.train_batch_size = 21
        self.val_batch_size = 10

        print('[INFO] Setting up image data generators...')

        self.train_generator = datagen.flow_from_directory(
            self.train_dir,
            target_size=(img_width, img_height),
            class_mode='categorical',
            shuffle=False,
            batch_size=self.train_batch_size)

        self.val_generator = datagen.flow_from_directory(
            self.val_dir,
            target_size=(img_width, img_height),
            class_mode='categorical',
            shuffle=False,
            batch_size=self.val_batch_size)

        num_classes = len(self.train_generator.class_indices)

        print('[INFO] Number of classes: ', num_classes)

        self.nb_train_samples = len(self.train_generator.filenames)
        # train_labels = self.train_generator.classes
        # self.train_labels = to_categorical(train_labels, num_classes=num_classes)
        # self.predict_size_train = int(math.ceil(self.nb_train_samples / self.train_batch_size))

        print('[INFO] Number of train samples: ', self.nb_train_samples)

        # print('[INFO] Predict size train: ', self.predict_size_train)

        # save the class indices to use use later in predictions
        # np.save('class_indices.npy', self.train_generator.class_indices)

        self.nb_val_samples = len(self.val_generator.filenames)
        # val_labels = self.val_generator.classes
        # self.val_labels = to_categorical(val_labels, num_classes=num_classes)
        # self.predict_size_test = int(math.ceil(self.nb_val_samples / self.val_batch_size))

        print('[INFO] Number of test samples: ', self.nb_val_samples)
        # print('[INFO] Predict size test: ', self.predict_size_test)

        self.steps_per_epoch = self.nb_train_samples // self.train_batch_size
        self.val_steps = self.nb_val_samples // self.val_batch_size

        # -------------------------------------------------------------------------------- #
        #                                Usage of callbacks
        # -------------------------------------------------------------------------------- #

        self.train_mode = train_mode
        self.pre_trained_model = pre_trained_model
        self.nb_of_conv_layers_to_fine_tune = nb_of_conv_layers_to_fine_tune
        self.weights_to_file = weights_to_file
        self.first_phase_trained_weights = first_phase_trained_weights
        self.nb_of_epochs = nb_of_epochs
        # self.modelCheckpoint_quantity = modelCheckpoint_quantity
        # self.earlyStopping_quantity = earlyStopping_quantity
        # self.CSVLogger_filename = CSVLogger_filename

        # self.steps_per_epoch = self.nb_train_samples // self.train_batch_size
        #
        #
        # self.val_steps = self.nb_val_samples // self.val_batch_size

        # CSVLogger
        model_log = 'trained_models/logs/' + CSVLogger_filename
        csv_logger = CSVLogger(model_log, append=True, separator=',')

        # ModelCheckpoint
        checkpointer = ModelCheckpoint(filepath=weights_to_file,
                                       monitor=modelCheckpoint_quantity,
                                       verbose=1,
                                       save_best_only=True,
                                       mode='auto',
                                       period=1,
                                       save_weights_only=True)

        early_stop = EarlyStopping(monitor=earlyStopping_quantity,
                                   patience=5,
                                   mode='auto')

        self.callbacks_list = [checkpointer, early_stop, csv_logger]
コード例 #49
0
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=32,
        class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=32,
        class_mode='categorical')

history = model.fit_generator(
        train_generator,
        samples_per_epoch=train_samples,
        nb_epoch=epoch,
        validation_data=validation_generator,
        nb_val_samples=validation_samples)
コード例 #50
0
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
print(classifier.summary())

#%% Image Augmentation
train_data = ImageDataGenerator(rescale=1. / 255,
                                zca_whitening=False,
                                rotation_range=35,
                                shear_range=0.2,
                                zoom_range=0.2,
                                horizontal_flip=True,
                                vertical_flip=True)
test_data = ImageDataGenerator(rescale=1. / 255)
training_set = train_data.flow_from_directory('training_set',
                                              target_size=(IMAGE_SIZE,
                                                           IMAGE_SIZE),
                                              batch_size=64,
                                              class_mode='categorical')
test_set = test_data.flow_from_directory('test_set',
                                         target_size=(IMAGE_SIZE, IMAGE_SIZE),
                                         batch_size=64,
                                         class_mode='categorical')

#%% Checkpoints
checkpoint = ModelCheckpoint(
    'checkpoints/best_model_improved_cp.h5',  # model filename
    monitor='val_acc',  # quantity to monitor
    verbose=0,  # verbosity - 0 or 1
    save_best_only=True,  # The latest best model will not be overwritten
    mode='auto')
def save_bottlebeck_features():
    np.random.seed(2929)

    vgg_model = applications.VGG16(weights='imagenet',
                                   include_top=False,
                                   input_shape=(150, 150, 3))
    print('Model loaded.')

    #initialise top model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=vgg_model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))

    model = Model(inputs=vgg_model.input, outputs=top_model(vgg_model.output))

    model.trainable = True

    model.summary()

    #Total of 20 layers. The classification is considered as one layer
    #Therefore, intermediate is 19 layers
    #0, 4[:4], 3[:7], 4[:11], 4[:15], 4[:19] (Group 0, 1, 2, 3, 4, 5)
    #0 -> All trainable
    #5 -> All non-trainable except classification layer
    #Always keep layer 20 trainable because it is classification layer
    #layer_count = 1
    for layer in model.layers[:7]:
        layer.trainable = False
    #print("NO-Top: Layer is %d trainable" %layer_count)
    #layer_count = layer_count + 1

    model.summary()

    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    sgd = optimizers.Adam(
        lr=1e-6
    )  #optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss="binary_crossentropy",
                  optimizer=sgd,
                  metrics=['accuracy'])

    #        model.compile(optimizer='rmsprop',
    #            loss='binary_crossentropy', metrics=['accuracy'])

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size,
        verbose=1)

    history_dict = history.history

    #Plotting the training and validation loss
    history_dict = history.history
    loss_values = history_dict['loss']
    val_loss_values = history_dict['val_loss']
    epochs_0 = range(1, len(history_dict['acc']) + 1)
    plt.plot(epochs_0, loss_values, 'bo', label='Training loss')
    plt.plot(epochs_0, val_loss_values, 'b', label='Validation loss')
    plt.title(
        'ADvsNM_32_VGG16_Freeze_data3_group2 - Training and validation loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    #plt.show()
    plt.savefig('ADvsNM_32_VGG16_Freeze_data3_group2_loss.png')
    plt.close()

    #Plotting the training and validation accuracy
    acc_values = history_dict['acc']
    val_acc_values = history_dict['val_acc']
    plt.plot(epochs_0, acc_values, 'bo', label='Training acc')
    plt.plot(epochs_0, val_acc_values, 'b', label='Validation acc')
    plt.title(
        'ADvsNM_32_VGG16_Freeze_data3_group2 - Training and validation accuracy'
    )
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    #plt.show()
    plt.savefig('ADvsNM_32_VGG16_Freeze_data3_group2_acc.png')
    plt.close()
コード例 #52
0
train_datagen = ImageDataGenerator(
      rescale=1./255,
      rotation_range=45,
      width_shift_range=0.3,
      height_shift_range=0.3,
      horizontal_flip=True,
      fill_mode='nearest')
 
validation_datagen = ImageDataGenerator(rescale=1./255)
 
# set our batch size (typically on most mid tier systems we'll use 16-32)
batch_size = 32
 
train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_rows, img_cols),
        batch_size=batch_size,
        class_mode='categorical')
 
validation_generator = validation_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_rows, img_cols),
        batch_size=batch_size,
        class_mode='categorical')


# ### Training out Model
# - Note we're using checkpointing and early stopping

# In[ ]:
gen = ImageDataGenerator(
  rotation_range=20,
  width_shift_range=0.1,
  height_shift_range=0.1,
  shear_range=0.1,
  zoom_range=0.2,
  horizontal_flip=True,
  vertical_flip=True,
  preprocessing_function=preprocess_input
)


# test generator to see how it works and some other useful things

# get label mapping for confusion matrix plot later
test_gen = gen.flow_from_directory(valid_path, target_size=IMAGE_SIZE)
print(test_gen.class_indices)
labels = [None] * len(test_gen.class_indices)
for k, v in test_gen.class_indices.items():
  labels[v] = k

# should be a strangely colored image (due to VGG weights being BGR)
for x, y in test_gen:
  print("min:", x[0].min(), "max:", x[0].max())
  plt.title(labels[np.argmax(y[0])])
  plt.imshow(x[0])
  plt.show()
  break


# create generators
コード例 #54
0
"""
Created on Tue Oct 29 09:54:11 2019

@author: Grupo 7
"""
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
newModel = tf.keras.models.load_model('Red_face6')  
newModel.summary()
train_datagen=ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen=ImageDataGenerator(rescale=1./255)
training_set=train_datagen.flow_from_directory('C:/Users/julia/Desktop/training_set_short', target_size=(128,128), batch_size=32, class_mode='binary')
test_set=test_datagen.flow_from_directory('C:/Users/julia/Desktop/test_set_short', target_size=(128,128), batch_size=32, class_mode='binary')
newModel.fit_generator(training_set, steps_per_epoch=38, epochs=3 , validation_data=test_set, validation_steps=3)
newModel.save('Red_face6')
int( np.ceil(1196/ 32) )
int( np.ceil(77/ 32) )

コード例 #55
0
ファイル: 5.3-vgg16-train.py プロジェクト: hunering/demo-code
cv_dir = os.path.join(small_base_dir, r"validation")
test_dir = os.path.join(small_base_dir, r"test")
batch_size = 20

train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2,
                                   height_shift_range=0.2, shear_range=0.2,
                                   zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)

conv_base = VGG16(weights='imagenet', include_top=False,
                  input_shape=(150, 150, 3))

model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu', input_dim=4*4*512))
model.add(layers.Dense(1, activation='sigmoid'))
conv_base.trainable = False
model.compile(optimizer=optimizers.RMSprop(lr=1e-5),
              loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy])
len(model.trainable_weights)


train_generator = train_datagen.flow_from_directory(
    train_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary')
validation_generator = train_datagen.flow_from_directory(
    cv_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary')

history = model.fit_generator(train_generator, epochs=100, steps_per_epoch=100,
                              validation_data=validation_generator, validation_steps=50)
コード例 #56
0
from keras.preprocessing.image import ImageDataGenerator

# make augmentors to generate augmented dataset
train_augmentor = ImageDataGenerator(
    rescale=1. /
    255,  # rescale RGB coeff b/w 0 & 1 by multiplying with a factor 1/255
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

test_augmentor = ImageDataGenerator(rescale=1. / 255)

# generate augmented dataset
training_set = train_augmentor.flow_from_directory(
    directory='dataset/training_set',
    target_size=(64, 64),
    batch_size=32,
    class_mode='binary')
#save_to_dir = 'dataset/augmented_training_set')

test_set = test_augmentor.flow_from_directory(directory='dataset/test_set',
                                              target_size=(64, 64),
                                              batch_size=32,
                                              class_mode='binary')
#save_to_dir = 'dataset/augmented_test_set')

# fit CNN to generated data
classifier.fit_generator(  # Trains the model on data generated batch-by-batch
    training_set,
    samples_per_epoch=8000,  # see steps_per_epoch
    epochs=25,
コード例 #57
0
ファイル: img_clf.py プロジェクト: jannson/Similar
def fine_tune():
    """recreates top model architecture/weights and fine tunes with image augmentation and optimizations"""
    
    # reconstruct vgg16 model
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # load vgg16 weights
    f = h5py.File(weights_path)
    
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            break
        g = f['layer_{}'.format(k)]
        weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
        model.layers[k].set_weights(weights)
        
    f.close()

    # add the classification layers
    top_model = Sequential()
    top_model.add(Flatten(input_shape=model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))

    top_model.load_weights(top_model_weights_path)

    # add the model on top of the convolutional base
    model.add(top_model)

    # set the first 25 layers (up to the last conv block)
    # to non-trainable (weights will not be updated)
    for layer in model.layers[:25]:
        layer.trainable = False

    # compile the model with a SGD/momentum optimizer
    # and a very slow learning rate.
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                  metrics=['accuracy'])

    # prepare data augmentation configuration
    train_datagen = ImageDataGenerator(
            rescale=1./255,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1./255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_height, img_width),
        batch_size=32,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_height, img_width),
        batch_size=32,
        class_mode='binary')

    # fine-tune the model
    model.fit_generator(
        train_generator,
        samples_per_epoch=nb_train_samples,
        nb_epoch=nb_epoch,
        validation_data=validation_generator,
        nb_val_samples=nb_validation_samples,
        callbacks=[early_stopping])

    # save the model
    json_string = model.to_json()

    with open('final_model_architecture.json', 'w') as f:
        f.write(json_string)

    model.save_weights('final_weights.h5')
    
    # return the model for convenience when making predictions
    return model 
コード例 #58
0

'''
model.compile(loss='categorical_crossentropy',
             optimizer = opt,
             metrics = ['accuracy'])
'''

# Image generators
train_datagen = ImageDataGenerator(rescale= 1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(image_size, image_size),
    shuffle=True,
    batch_size=batch_size,
    class_mode='categorical'
    )

validation_generator = validation_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(image_size, image_size),
    batch_size=batch_size,
    shuffle=True,
    class_mode='categorical'
    )

def generate_arrays_from_file(generator):
    for x,y in generator:
        # x = x.reshape(batch_size, image_size*image_size*3)
コード例 #59
0
ファイル: img_clf.py プロジェクト: jannson/Similar
def save_bottlebeck_features():
    """builds the pretrained vgg16 model and runs it on our training and validation datasets"""
    datagen = ImageDataGenerator(rescale=1./255)

    # match the vgg16 architecture so we can load the pretrained weights into this model
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # load VGG16 weights
    f = h5py.File(weights_path)
    
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            break
        g = f['layer_{}'.format(k)]
        weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
        model.layers[k].set_weights(weights)
        
    f.close()    
    print 'Model loaded.'

    generator = datagen.flow_from_directory(
            train_data_dir,
            target_size=(img_width, img_height),
            batch_size=32,
            class_mode=None,
            shuffle=False)
    bottleneck_features_train = model.predict_generator(generator, nb_train_samples)
    np.save(open('bottleneck_features_train.npy', 'wb'), bottleneck_features_train)

    generator = datagen.flow_from_directory(
            validation_data_dir,
            target_size=(img_width, img_height),
            batch_size=32,
            class_mode=None,
            shuffle=False)
    bottleneck_features_validation = model.predict_generator(generator, nb_validation_samples)
    np.save(open('bottleneck_features_validation.npy', 'wb'), bottleneck_features_validation)
コード例 #60
0
              optimizer=optimizers.RMSprop(lr=2e-5),    # 使用的优化器 RMS
              metrics=['acc'])                          # 监控精度

train_datagen = ImageDataGenerator(
    rescale=1./255,             # 图像乘以1./255 缩放到0-1之间
    rotation_range=40,          # 角度值,图像随机旋转的角度范围
    width_shift_range=0.2,      # 水平方向上平移的范围
    height_shift_range=0.2,     # 垂直方向上平移的范围
    shear_range=0.2,            # 随机错切变换的角度
    zoom_range=0.2,             # 图像随机缩放的范围
    horizontal_flip=True)       # 随机将一半图像水平翻转
validation_datagen = ImageDataGenerator(rescale=1./255)  # 图像增强不用于验证集

train_generator = train_datagen.flow_from_directory(
    train_dir,                  # 训练集路径
    target_size=(200, 200),     # 将所有图像的大小调整为150*150
    batch_size=64,              # 每次批量大小
    class_mode='binary')        # 因为使用的二元交叉熵
validation_generator = validation_datagen.flow_from_directory(
    validation_dir,             # 验证集路径
    target_size=(200, 200),
    batch_size=64,
    class_mode='binary')

model.fit_generator(              # 开始训练
    train_generator,                        # 训练模型使用的训练集图像生成器
    steps_per_epoch=24,                    # 从生成器中抽取 steps_per_epoch 个批量后,进入下次迭代
    epochs=40,                              # 迭代次数
    validation_data=validation_generator,   # 验证集
    validation_steps=64)                    # 从验证集里抽取多少个批量用于评估