x = Flatten()(vgg.output)

prediction=Dense(len(folders), activation='softmax')(x)

model = Model(inputs=vgg.input, outputs=prediction)

model.summary()

model.compile(
  loss='categorical_crossentropy',
  optimizer='adam',
  metrics=['accuracy']
)

batch_size=32
r= model.fit_generator(training_set,epochs = 10, validation_data = test_set,verbose = 1, steps_per_epoch=X_train.shape[0] // batch_size)

# plot the loss
plt.figure(figsize=(10,6))

plt.subplot(1,2,1)
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.savefig('LossVal_loss')

# plot the accuracy
plt.subplot(1,2,2)
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
Пример #2
0
                                      batch_size=batch_size,
                                      target_size=img_shape)

model = ResNet50(include_top=False, input_shape=(298, 298, 3))
for layer in model.layers:
    layer.trainable = False
flat1 = Flatten()(model.layers[-1].output)
class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1)
output = Dense(1, activation='sigmoid')(class1)
model = Model(inputs=model.inputs, outputs=output)

opt = SGD(lr=lr, momentum=0.9)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])

history = model.fit_generator(train_it,
                              steps_per_epoch=len(train_it),
                              epochs=epochs,
                              verbose=1)

if 'acc' in history.history.keys():
    for i in range(1, epochs + 1):
        log_metrics('accuracy', float(history.history['acc'][i - 1]), i, i)
        log_metrics('loss', float(history.history['loss'][i - 1]), i, i)
        print("accuracy=", float(history.history['acc'][i - 1]))
        print("loss=", float(history.history['loss'][i - 1]))
else:
    for i in range(1, epochs + 1):
        log_metrics('accuracy', float(history.history['accuracy'][i - 1]), i,
                    i)
        log_metrics('loss', float(history.history['loss'][i - 1]), i, i)
        print("accuracy=", float(history.history['accuracy'][i - 1]))
        print("loss=", float(history.history['loss'][i - 1]))
Пример #3
0
    labels[v] = k

train_generator = gen.flow_from_directory(train_path,
                                          target_size=IMAGE_SIZE,
                                          shuffle=True,
                                          batch_size=batch_size)

valid_generator = gen.flow_from_directory(validation_path,
                                          target_size=IMAGE_SIZE,
                                          shuffle=True,
                                          batch_size=batch_size)

r = model.fit_generator(
    train_generator,
    #validation_data=valid_generator,
    epochs=15,
    steps_per_epoch=len(image_files) // batch_size,
    #validation_steps=len(validation_files)//batch_size
)


def get_confusion_matrix(data_path, N):
    print("Generating Confusion Matrix {}".format(N))
    predictions = []
    targets = []
    i = 0
    for x, y in gen.flow_from_directory(data_path,
                                        target_size=IMAGE_SIZE,
                                        shuffle=False,
                                        batch_size=batch_size * 2):
        i += 1
Пример #4
0
    y_true *= 255
    y_pred *= 255
    rmean = (y_true[:, :, :, 0] + y_pred[:, :, :, 0]) / 2
    r = y_true[:, :, :, 0] - y_pred[:, :, :, 0]
    g = y_true[:, :, :, 1] - y_pred[:, :, :, 1]
    b = y_true[:, :, :, 2] - y_pred[:, :, :, 2]

    return K.mean(K.sqrt((((512+rmean)*r*r)/256) + 4*g*g + (((767-rmean)*b*b)/256)))

val_generator = image_generator(config.batch_size, train_dir)
in_sample_images, out_sample_images = next(val_generator)
class ImageLogger(Callback):
    def on_epoch_end(self, epoch, logs):
        preds = self.model.predict(in_sample_images)
        in_resized = []
        for arr in in_sample_images:
            # Simple upsampling
            in_resized.append(arr.repeat(8, axis=0).repeat(8, axis=1))
        wandb.log({
            "examples": [wandb.Image(np.concatenate([in_resized[i] * 255, o * 255, out_sample_images[i] * 255], axis=1)) for i, o in enumerate(preds)]
        }, commit=False)
    
opt = Adam(lr=0.0001, beta_1=0.9)
generator.compile(loss='mse', optimizer=opt, metrics=[perceptual_distance])

generator.fit_generator(image_generator(config.batch_size, train_dir),
                            steps_per_epoch=config.steps_per_epoch,
                            epochs=config.num_epochs, callbacks=[
                            ImageLogger(), WandbCallback()],
                            validation_steps=config.val_steps_per_epoch,
                            validation_data=val_generator)
Пример #5
0
class IIC():
    def __init__(self, args, backbone):
        self.args = args
        self.backbone = backbone
        self._model = None
        self.train_gen = DataGenerator(args, siamese=True)
        self.n_labels = self.train_gen.n_labels
        self.build_model()
        self.load_eval_dataset()
        self.accuracy = 0

    # build the n_heads of the IIC model
    def build_model(self):
        inputs = Input(shape=self.train_gen.input_shape)
        x = self.backbone(inputs)
        x = Flatten()(x)
        outputs = []
        for i in range(self.args.heads):
            name = "head%d" % i
            outputs.append(
                Dense(self.n_labels, activation='softmax', name=name)(x))
        self._model = Model(inputs, outputs, name='encoder')
        optimizer = Adam(lr=1e-3)
        self._model.compile(optimizer=optimizer, loss=self.loss)
        self._model.summary()

    # MI loss
    def loss(self, y_true, y_pred):
        size = self.args.batch_size
        n_labels = y_pred.shape[-1]
        # lower half is Z
        Z = y_pred[0:size, :]
        Z = K.expand_dims(Z, axis=2)
        # upper half is Zbar
        Zbar = y_pred[size:y_pred.shape[0], :]
        Zbar = K.expand_dims(Zbar, axis=1)
        # compute joint distribution
        P = K.batch_dot(Z, Zbar)
        P = K.sum(P, axis=0)
        # enforce symmetric joint distribution
        P = (P + K.transpose(P)) / 2.0
        P = P / K.sum(P)
        # marginal distributions
        Pi = K.expand_dims(K.sum(P, axis=1), axis=1)
        Pj = K.expand_dims(K.sum(P, axis=0), axis=0)
        Pi = K.repeat_elements(Pi, rep=n_labels, axis=1)
        Pj = K.repeat_elements(Pj, rep=n_labels, axis=0)
        P = K.clip(P, K.epsilon(), np.finfo(float).max)
        Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max)
        Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max)
        # negative MI loss
        neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P))))
        # each head contribute 1/n_heads to the total loss
        return neg_mi / self.args.heads

    # train the model
    def train(self):
        accuracy = AccuracyCallback(self)
        lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1)
        callbacks = [accuracy, lr_scheduler]
        self._model.fit_generator(generator=self.train_gen,
                                  use_multiprocessing=True,
                                  epochs=self.args.epochs,
                                  callbacks=callbacks,
                                  workers=4,
                                  shuffle=True)

    # pre-load test data for evaluation
    def load_eval_dataset(self):
        (_, _), (x_test, self.y_test) = self.args.dataset.load_data()
        image_size = x_test.shape[1]
        x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
        x_test = x_test.astype('float32') / 255
        x_eval = np.zeros([x_test.shape[0], *self.train_gen.input_shape])
        for i in range(x_eval.shape[0]):
            x_eval[i] = center_crop(x_test[i])

        self.x_test = x_eval

    # reload model weights for evaluation
    def load_weights(self):
        if self.args.restore_weights is None:
            raise ValueError("Must load model weights for evaluation")

        if self.args.restore_weights:
            folder = "weights"
            os.makedirs(folder, exist_ok=True)
            path = os.path.join(folder, self.args.restore_weights)
            print("Loading weights... ", path)
            self._model.load_weights(path)

    # evaluate the accuracy of the current model weights
    def eval(self):
        y_pred = self._model.predict(self.x_test)
        print("")
        # accuracy per head
        for head in range(self.args.heads):
            if self.args.heads == 1:
                y_head = y_pred
            else:
                y_head = y_pred[head]
            y_head = np.argmax(y_head, axis=1)

            accuracy = unsupervised_labels(list(self.y_test), list(y_head),
                                           self.n_labels, self.n_labels)
            info = "Head %d accuracy: %0.2f%%"
            if self.accuracy > 0:
                info += ", Old best accuracy: %0.2f%%"
                data = (head, accuracy, self.accuracy)
            else:
                data = (head, accuracy)
            print(info % data)
            # if accuracy improves during training,
            # save the model weights on a file
            if accuracy > self.accuracy \
                    and self.args.save_weights is not None:
                self.accuracy = accuracy
                folder = self.args.save_dir
                os.makedirs(folder, exist_ok=True)
                path = os.path.join(folder, self.args.save_weights)
                print("Saving weights... ", path)
                self._model.save_weights(path)

    @property
    def model(self):
        return self._model
Пример #6
0
caption_model = Model(inputs=[inputs1, inputs2], outputs=outputs)

caption_model.layers[2].set_weights([embedding_matrix])
caption_model.layers[2].trainable = False
caption_model.compile(loss='categorical_crossentropy', optimizer='adam')

number_pics_per_bath = 3
steps = len(train_descriptions)//number_pics_per_bath


model_path = os.path.join(root_captioning,"data",f'caption-model-coco.hdf5')

if not os.path.exists(model_path):
  for i in tqdm(range(EPOCHS*2)):
      generator = data_generator(train_descriptions, encoding_train, wordtoidx, max_length, number_pics_per_bath)
      caption_model.fit_generator(generator, epochs=1, steps_per_epoch=steps, verbose=1)

  caption_model.optimizer.lr = 1e-4
  number_pics_per_bath = 6
  steps = len(train_descriptions)//number_pics_per_bath

  for i in range(EPOCHS):
      generator = data_generator(train_descriptions, encoding_train, wordtoidx, max_length, number_pics_per_bath)
      caption_model.fit_generator(generator, epochs=1, steps_per_epoch=steps, verbose=1)  
  caption_model.save_weights(model_path)

  print(f"\Training took: {hms_string(time()-start)}")
else:
  caption_model.load_weights(model_path)

def generateCaption(photo):
Пример #7
0
def main():
    directory = 'img' # 画像が保存されているフォルダ
    df_train = pd.read_csv('train.csv') # 学習データの情報がかかれたDataFrame
    df_validation = pd.read_csv('val.csv') # 検証データの情報がかかれたDataFrame
    df_test = pd.read_csv('test.csv') # テストデータの情報がかかれたDataFrame
    label_list = ['AMD', 'DR_DM', 'Gla', 'MH', 'Normal', 'RD', 'RP', 'RVO'] # ラベル名
    image_size = (224, 224) # 入力画像サイズ
    classes = len(label_list) # 分類クラス数
    batch_size = 32 # バッチサイズ
    epochs = 300 # エポック数
    loss = 'categorical_crossentropy' # 損失関数
    optimizer = Adam(lr=0.00001, amsgrad=True) # 最適化関数
    metrics = 'accuracy' # 評価方法
    # ImageDataGenerator画像増幅のパラメータ
    aug_params = {'rotation_range': 5,
                  'width_shift_range': 0.05,
                  'height_shift_range': 0.05,
                  'shear_range': 0.1,
                  'zoom_range': 0.05,
                  'horizontal_flip': True,
                  'vertical_flip': True}


    # val_lossが最小になったときのみmodelを保存
    mc_cb = ModelCheckpoint('model_weights.h5',
                            monitor='val_loss', verbose=1,
                            save_best_only=True, mode='min')
    # 学習が停滞したとき、学習率を0.2倍に
    rl_cb = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=3,
                              verbose=1, mode='auto',
                              min_delta=0.0001, cooldown=0, min_lr=0)
    # 学習が進まなくなったら、強制的に学習終了
    es_cb = EarlyStopping(monitor='loss', min_delta=0,
                          patience=5, verbose=1, mode='auto')


    # データの数に合わせて損失の重みを調整
    weight_balanced = {}
    for i, label in enumerate(label_list):
        weight_balanced[i] = (df_train['label'] == label).sum()
    max_count = max(weight_balanced.values())
    for label in weight_balanced:
        weight_balanced[label] = max_count / weight_balanced[label]
    print(weight_balanced)


    # ジェネレータの生成
    ## 学習データのジェネレータ
    datagen = ImageDataGenerator(rescale=1./255, **aug_params)
    train_generator = datagen.flow_from_dataframe(
        dataframe=df_train, directory=directory,
        x_col='filename', y_col='label',
        target_size=image_size, class_mode='categorical',
        classes=label_list,
        batch_size=batch_size)
    step_size_train = train_generator.n // train_generator.batch_size
    ## 検証データのジェネレータ
    datagen = ImageDataGenerator(rescale=1./255)
    validation_generator = datagen.flow_from_dataframe(
        dataframe=df_validation, directory=directory,
        x_col='filename', y_col='label',
        target_size=image_size, class_mode='categorical',
        classes=label_list,
        batch_size=batch_size)
    step_size_validation = validation_generator.n // validation_generator.batch_size


    # ネットワーク構築
    base_model = EfficientNetB0(include_top=False, weights='imagenet', pooling='avg',
                       input_shape=(image_size[0], image_size[1], 3),
                       backend=tf.keras.backend, layers=tf.keras.layers,
                       models=tf.keras.models, utils=tf.keras.utils)
    x = Dense(256, kernel_initializer='he_normal')(base_model.output)
    x = Dense(classes, kernel_initializer='he_normal')(x)
    outputs = Activation('softmax')(x)
    model = Model(inputs=base_model.inputs, outputs=outputs)

    model.summary()
    model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])


    # 学習
    history = model.fit_generator(
        train_generator, steps_per_epoch=step_size_train,
        epochs=epochs, verbose=1, callbacks=[mc_cb, rl_cb, es_cb],
        validation_data=validation_generator,
        validation_steps=step_size_validation,
        class_weight=weight_balanced,
        workers=3)

    # 学習曲線の保存
    plot_history(history)


    # テストデータの評価
    ## 学習済み重みの読み込み
    model.load_weights('model_weights.h5')

    ## 推論
    X = df_test['filename'].values
    y_true = list(map(lambda x: label_list.index(x), df_test['label'].values))
    y_pred = []
    for file in tqdm(X, desc='pred'):
        # 学習時と同じ条件になるように画像をリサイズ&変換
        img = Image.open(f'{directory}/{file}')
        img = img.resize(image_size)
        img = np.array(img, dtype=np.float32)
        img *= 1./255
        img = np.expand_dims(img, axis=0)

        y_pred.append(np.argmax(model.predict(img)[0]))

    ## 評価
    print(classification_report(y_true, y_pred, target_names=label_list))
Пример #8
0
                                                 include_top=False,
                                                 weights='imagenet')

w = base_model.output
w = Flatten()(w)
w = Dense(256, activation="relu")(w)
w = Dense(128, activation="relu")(w)
output = Dense(17, activation="sigmoid")(w)

# Compile the model for execution. Losses and optimizers
# can be anything here, since we don’t train the model.
model = Model(inputs=[base_model.inputs[0]], outputs=[output])

model.layers[-6].trainable = True
model.layers[-7].trainable = True
model.layers[-8].trainable = True

#train NN
model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])

model.fit_generator(generator=train_generator,
                    steps_per_epoch=STEP_SIZE_TRAIN,
                    validation_data=val_generator,
                    validation_steps=STEP_SIZE_VALID,
                    epochs=EpochSize,
                    class_weight=weights)

model.save('EfficientNetB5_Comp.h5')
Пример #9
0
    print(layer.name, layer.trainable)

# compile the model
print("----------Compiling model----------")
optim = Adam(lr=init_LR, decay=init_LR / Epochs)
model.compile(loss="binary_crossentropy",
              optimizer=optim,
              metrics=["accuracy"])

# train the head of the network
print("----------Training head----------")
trainHead = model.fit_generator(traindataAugm.flow(train_X,
                                                   train_Y,
                                                   batch_size=BS),
                                steps_per_epoch=len(train_X) // BS,
                                validation_data=(val_X, val_Y),
                                validation_steps=len(val_X) // BS,
                                epochs=Epochs)

# make predictions on the testing set
print("----------Evaluating test set----------")
testpred = model.predict(test_X, batch_size=BS)

# set label with corresponding largest predicted probability

testpred_a = (testpred[:, 1] > 0.5) * 1

#Classification report #########################
print(
    classification_report(test_Y.argmax(axis=1),
Пример #10
0
# 設定凍結與要進行訓練的網路層
net_final = Model(inputs=net.input, outputs=output_layer)
for layer in net_final.layers[:FREEZE_LAYERS]:
    layer.trainable = False
for layer in net_final.layers[FREEZE_LAYERS:]:
    layer.trainable = True


# 使用 Adam optimizer,以較低的 learning rate 進行 fine-tuning
net_final.compile(optimizer=Adam(lr=1e-5),
                  loss='categorical_crossentropy', metrics=['accuracy'])
logdir = os.path.join(
    "logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
# 輸出整個網路結構
print(net_final.summary())

# 訓練模型
net_final.fit_generator(train_batches,
                        steps_per_epoch=train_batches.samples // BATCH_SIZE,
                        validation_data=valid_batches,
                        validation_steps=valid_batches.samples // BATCH_SIZE,
                        epochs=NUM_EPOCHS,
                        callbacks=[tensorboard_callback])

# 儲存訓練好的模型
net_final.save(WEIGHTS_FINAL)

# 如果需要show出tensorboard的圖 使用下面這行
# %tensorboard --logdir logs
else:
    file_name = "dense"
checkpointer = ModelCheckpoint(
    "../data/models/" + file_name + "_ms_transfer_alternative_init." +
    "{epoch:02d}-{val_categorical_accuracy:.3f}." + "hdf5",
    monitor='val_categorical_accuracy',
    verbose=1,
    save_best_only=True,
    mode='max')
earlystopper = EarlyStopping(monitor='val_categorical_accuracy',
                             patience=10,
                             mode='max',
                             restore_best_weights=True)
history = model.fit_generator(train_generator,
                              steps_per_epoch=1000,
                              epochs=10000,
                              callbacks=[checkpointer, earlystopper],
                              validation_data=validation_generator,
                              validation_steps=500)
initial_epoch = len(history.history['loss']) + 1

# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers. We will freeze the bottom N layers
# and train the remaining top layers.

# let's visualize layer names and layer indices to see how many layers
# we should freeze:
names = []
for i, layer in enumerate(model.layers):
    names.append([i, layer.name, layer.trainable])
print(names)
Пример #12
0
Batch_size = 128

train_generator = gen.flow_from_directory(train_path,
                                          shuffle=True,
                                          target_size=img_size,
                                          batch_size=Batch_size)

valid_generator = gen.flow_from_directory(validation_path,
                                          target_size=img_size,
                                          batch_size=Batch_size)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# adding early stopping
from tensorflow.keras.callbacks import EarlyStopping

early_stop = EarlyStopping(monitor='val_loss', patience=2)

#fit model
results = model.fit_generator(train_generator,
                              validation_data=valid_generator,
                              epochs=25,
                              callbacks=[early_stop])

#accuracy= approx 90%

# model.save('rock_paper_scissor.h5')
Пример #13
0
    optimizer = Adam(0.001)
    combine.compile(loss='categorical_crossentropy', optimizer=optimizer,
        metrics=['categorical_accuracy'])

    train_loader = DataGenerator(way=train_way, query=train_query, shot=shot, num_batch=1000)
    val_loader = DataGenerator(data_type='val',way=val_way, shot=shot)
    test_loader = DataGenerator(data_type='test',way=val_way, shot=shot, num_batch=1000)

    save_conv = SaveConv()
    reduce_lr = cb.ReduceLROnPlateau(monitor='val_loss', factor=0.4,patience=2, min_lr=1e-8)
    lr_sched = cb.LearningRateScheduler(scheduler)
    tensorboard = cb.TensorBoard()


    combine.fit_generator(train_loader,epochs=50,validation_data=val_loader,
        use_multiprocessing=True, workers=4, shuffle=False,
        callbacks=[save_conv, lr_sched, tensorboard])
    combine.evaluate(test_loader)

    save_model(conv, "model/miniimage_conv_{epoch}_{shot}_{val_way}")
    combine.evaluate(test_loader)


# images, labels = zip(*list(loader('python/images_background')))
# images = np.expand_dims(images, axis=-1)
# images = np.repeat(images, repeats=3, axis=-1)
# print(images.shape)
# main_labels, sub_labels= [x[0] for x in labels], [x[1] for x in labels]
# encoder = LabelBinarizer()
# enc_main_labels = encoder.fit_transform(main_labels)
# output_num = len(np.unique(main_labels))
    figPath = os.path.join(model_dir, 'checkpoints', 'progress', 'train.png')
    jsonPath = os.path.join(model_dir, 'checkpoints', 'progress', 'train.json')
    train_monitor = TrainingMonitor(figPath,
                                    jsonPath=jsonPath,
                                    startAt=init_epoch_train)
    # TypeError: Object of type float32 is not JSON serializable
    # train_monitor = TrainingMonitor(figPath)

    callbacks_list = [early_stop, reduce_lr, checkpoint, train_monitor]

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=train_steps,
                                  epochs=train_epochs,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps,
                                  class_weight=class_weights,
                                  initial_epoch=init_epoch_train,
                                  max_queue_size=15,
                                  workers=8,
                                  callbacks=callbacks_list)

    plot_history(history,
                 save_fig=True,
                 save_path=os.path.join('data', 'models', 'bottleneck.png'))

    print("\n")

    bn_end_time = datetime.now()
    print("[Info] Model Bottlenecking completed at: {}".format(bn_end_time))

    bn_duration = bn_end_time - bn_start_time
Пример #15
0
class MINE:
    def __init__(self, args, backbone):
        """Contains the encoder, SimpleMINE, and linear 
            classifier models, the loss function,
            loading of datasets, train and evaluation routines
            to implement MINE unsupervised clustering via mutual
            information maximization

        Arguments:
            args : Command line arguments to indicate choice
                of batch size, folder to save
                weights file, weights file name, etc
            backbone (Model): MINE Encoder backbone (eg VGG)
        """
        self.args = args
        self.latent_dim = args.latent_dim
        self.backbone = backbone
        self._model = None
        self._encoder = None
        self.train_gen = DataGenerator(args, siamese=True, mine=True)
        self.n_labels = self.train_gen.n_labels
        self.build_model()
        self.accuracy = 0

    def build_model(self):
        """Build the MINE model unsupervised classifier
        """
        inputs = Input(shape=self.train_gen.input_shape, name="x")
        x = self.backbone(inputs)
        x = Flatten()(x)
        y = Dense(self.latent_dim, activation='linear', name="encoded_x")(x)
        # encoder is based on backbone (eg VGG)
        # feature extractor
        self._encoder = Model(inputs, y, name="encoder")
        # the SimpleMINE in bivariate Gaussian is used
        # as T(x,y) function in MINE (Algorithm 13.7.1)
        self._mine = SimpleMINE(self.args,
                                input_dim=self.latent_dim,
                                hidden_units=1024,
                                output_dim=1)
        inputs1 = Input(shape=self.train_gen.input_shape, name="x")
        inputs2 = Input(shape=self.train_gen.input_shape, name="y")
        x1 = self._encoder(inputs1)
        x2 = self._encoder(inputs2)
        outputs = self._mine.model([x1, x2])
        # the model computes the MI between
        # inputs1 and 2 (x and y)
        self._model = Model([inputs1, inputs2], outputs, name='encoder')
        optimizer = Adam(lr=1e-3)
        self._model.compile(optimizer=optimizer, loss=self.mi_loss)
        self._model.summary()
        self.load_eval_dataset()
        self._classifier = LinearClassifier(\
                            latent_dim=self.latent_dim)

    def mi_loss(self, y_true, y_pred):
        """ MINE loss function

        Arguments:
            y_true (tensor): Not used since this is
                unsupervised learning
            y_pred (tensor): stack of predictions for
                joint T(x,y) and marginal T(x,y)
        """
        size = self.args.batch_size
        # lower half is pred for joint dist
        pred_xy = y_pred[0:size, :]

        # upper half is pred for marginal dist
        pred_x_y = y_pred[size:y_pred.shape[0], :]
        loss = K.mean(K.exp(pred_x_y))
        loss = K.clip(loss, K.epsilon(), np.finfo(float).max)
        loss = K.mean(pred_xy) - K.log(loss)
        return -loss

    def train(self):
        """Train MINE to estimate MI between 
            X and Y (eg MNIST image and its transformed
            version)
        """
        accuracy = AccuracyCallback(self)
        lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1)
        callbacks = [accuracy, lr_scheduler]
        self._model.fit_generator(generator=self.train_gen,
                                  use_multiprocessing=True,
                                  epochs=self.args.epochs,
                                  callbacks=callbacks,
                                  workers=4,
                                  shuffle=True)

    def load_eval_dataset(self):
        """Pre-load test data for evaluation
        """
        (_, _), (x_test, self.y_test) = \
                self.args.dataset.load_data()
        image_size = x_test.shape[1]
        x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
        x_test = x_test.astype('float32') / 255
        x_eval = np.zeros([x_test.shape[0], *self.train_gen.input_shape])
        for i in range(x_eval.shape[0]):
            x_eval[i] = center_crop(x_test[i])

        self.y_test = to_categorical(self.y_test)
        self.x_test = x_eval

    def load_weights(self):
        """Reload model weights for evaluation
        """
        if self.args.restore_weights is None:
            error_msg = "Must load model weights for evaluation"
            raise ValueError(error_msg)

        if self.args.restore_weights:
            folder = "weights"
            os.makedirs(folder, exist_ok=True)
            path = os.path.join(folder, self.args.restore_weights)
            print("Loading weights... ", path)
            self._model.load_weights(path)

    def eval(self):
        """Evaluate the accuracy of the current model weights
        """
        # generate clustering predictions fr test data
        y_pred = self._encoder.predict(self.x_test)
        # train a linear classifier
        # input: clustered data
        # output: ground truth labels
        self._classifier.train(y_pred, self.y_test)
        accuracy = self._classifier.eval(y_pred, self.y_test)

        info = "Accuracy: %0.2f%%"
        if self.accuracy > 0:
            info += ", Old best accuracy: %0.2f%%"
            data = (accuracy, self.accuracy)
        else:
            data = (accuracy)
        print(info % data)
        # if accuracy improves during training,
        # save the model weights on a file
        if accuracy > self.accuracy \
            and self.args.save_weights is not None:
            folder = self.args.save_dir
            os.makedirs(folder, exist_ok=True)
            args = (self.latent_dim, self.args.save_weights)
            filename = "%d-dim-%s" % args
            path = os.path.join(folder, filename)
            print("Saving weights... ", path)
            self._model.save_weights(path)

        if accuracy > self.accuracy:
            self.accuracy = accuracy

    @property
    def model(self):
        return self._model

    @property
    def encoder(self):
        return self._encoder

    @property
    def classifier(self):
        return self._classifier
                                                subset='validation')

import tensorflow.keras.layers as layers
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Reshape, Concatenate, LeakyReLU
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.applications import MobileNetV2

x = Input(shape=(256, 256, 3))

#ImageNet weights
model_mn = MobileNetV2(input_shape=(256, 256, 3), alpha=1.3, include_top=False)

#Use the generated model
output_mn = model_mn(x)
#Add the fully-connected layers
y = Flatten()(output_mn)
y = Dense(1000, activation='relu')(y)
y = Dense(1000, activation='relu')(y)
y = Dropout(0.5)(y)
y = Dense(1, activation='sigmoid')(y)

model = Model(inputs=x, outputs=y)
model.summary()

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit_generator(training_images,
                    validation_data=validation_images,
                    epochs=10)
                             mode='auto')

es = EarlyStopping(mode='min', monitor='val_loss', min_delta=0.001, verbose=1)

adam = Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

print("Traning Model...")
history = model.fit_generator(train_data_gen,
                              epochs=epochs,
                              verbose=1,
                              validation_data=valid_data_gen,
                              callbacks=[checkpoint, es],
                              steps_per_epoch=len(train_data_gen),
                              validation_steps=len(valid_data_gen))

# test
y_pred = model.predict(test_data_gen)
y_pred_labels = np.argmax(y_pred, axis=1)

print('VGG16,acc on test:\t',
      accuracy_score(test_data_gen.classes, y_pred_labels))
confusion_matrix = metrics.confusion_matrix(y_true=test_data_gen.classes,
                                            y_pred=y_pred_labels)

print(confusion_matrix)
Пример #18
0
                             save_best_only=True,
                             mode='max')

reduce_lr = ReduceLROnPlateau(monitor='val_top_3_accuracy',
                              factor=0.5,
                              patience=2,
                              verbose=1,
                              mode='max',
                              min_lr=0.00001)

callbacks_list = [checkpoint, reduce_lr]

history = model.fit_generator(train_batches,
                              steps_per_epoch=train_steps,
                              class_weight=class_weights,
                              validation_data=valid_batches,
                              validation_steps=val_steps,
                              epochs=30,
                              verbose=0,
                              callbacks=callbacks_list)

# Now we need to conver the model.h5 model to a tflite model so that it is
# compatible with the tensorflow android app.

new_model = tf.keras.models.load_model(filepath="model.h5",
                                       custom_objects={
                                           'top_2_accuracy': top_2_accuracy,
                                           'top_3_accuracy': top_3_accuracy
                                       })

tflite_converter = tf.lite.TFLiteConverter.from_keras_model(new_model)
tflite_model = tflite_converter.convert()
Пример #19
0
#Plot the accuracy...............
plt.plot(r.history['accuracy'], label='accuracy')
plt.plot(r.history['val_accuracy'], label='val_accuracy')
plt.legend()
plt.show()

#Fit the model with data augmentation
#Remember:: If you run this after first fit, it will continue training where it left off.
batch_size = 32
data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
    width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
train_generator = data_generator.flow(x_train, y_train, batch_size)
steps_per_epochs = x_train.shape[0] // batch_size
r = model.fit_generator(train_generator,
                        validation_data=(x_test, y_test),
                        steps_per_epoch=steps_per_epochs,
                        epochs=7)

#Plot the loss...........
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
plt.show()

#Plot the accuracy............
plt.plot(r.history['accuracy'], label='accuracy')
plt.plot(r.history['val_accuracy'], label='val_accuracy')
plt.legend()
plt.show()

Пример #20
0
es = EarlyStopping(monitor='val_loss', patience=15, mode='min')
file_path = 'c:/data/modelcheckpoint/lotte_last5.hdf5'
mc = ModelCheckpoint(file_path,
                     monitor='val_loss',
                     save_best_only=True,
                     mode='min',
                     verbose=1)
rl = ReduceLROnPlateau(monitor='val_loss',
                       factor=0.5,
                       patience=5,
                       verbose=1,
                       mode='min')

model.fit_generator(xy_train,
                    steps_per_epoch=(xy_train.samples / xy_train.batch_size),
                    epochs=1000,
                    validation_data=xy_val,
                    validation_steps=(xy_val.samples / xy_val.batch_size),
                    callbacks=[es, mc, rl])

from tensorflow.keras.models import load_model
model = load_model('c:/data/modelcheckpoint/lotte_last5.hdf5')

for i in range(72000):
    image = Image.open(f'../data/lotte/test/test/{i}.jpg')
    image.save('../data/lotte/test_new/test_new/{0:05}.jpg'.format(i))

test_data = test_datagen.flow_from_directory('../data/lotte/test_new',
                                             target_size=(img_size, img_size),
                                             batch_size=batch,
                                             class_mode=None,
                                             shuffle=False)
    # Use GPU
    strategy = tf.distribute.MirroredStrategy()
    with strategy.scope():
        base_model = ResNet50(weights='imagenet', include_top=False)
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)

        predictions = Dense(n_classes, activation = 'softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)

        for layer in base_model.layers:
            layer.trainable = False

        # define optimizers
        model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

    model.summary()

    # training
    model.fit_generator(generator=train_generator,
                        validation_data=test_generator,
                        use_multiprocessing=True,
                        workers=Config['num_workers'],
                        )





                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=None,
                          decay=0.0,
                          amsgrad=False)
model_final.compile(loss='binary_crossentropy',
                    optimizer=optim,
                    metrics=['accuracy', keras_metrics.fbeta_score])

#%%
history = model_final.fit_generator(
    image_generator(image_list,
                    labels,
                    batch_size=16,
                    selection_indices=train_indices),
    steps_per_epoch=len(train_indices) / 16,
    epochs=10,
    validation_data=image_generator(image_list,
                                    labels,
                                    batch_size=16,
                                    selection_indices=val_indices),
    validation_steps=len(val_indices) / 16)

#%%
# model_final.summary()
print(len(model_final.layers[-3:]))

# for layer in model_final.layers[:-3]:
#     layer.trainable = False
#%%

#%%
Пример #23
0
    'ctc_loss': lambda y_true, y_pred: y_pred
},
                       optimizer="adam")

complete_model.summary()


def decode_examples(images, labels):
    """Transform our output for example logging"""
    return complete_model.predict(format_batch_ctc(images, labels)[0])[1]


# Initialize generators and train
train = Generator(dataset.x_train,
                  dataset.y_train,
                  batch_size=32,
                  format_fn=format_batch_ctc)
test = Generator(dataset.x_test,
                 dataset.y_test,
                 batch_size=32,
                 format_fn=format_batch_ctc)
complete_model.fit_generator(
    train,
    epochs=75,
    callbacks=[
        ExampleLogger(dataset, decode_examples),
        ModelCheckpoint("best-ctc.h5", save_best_only=True)
    ],
    validation_data=test,
)
Пример #24
0
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(
        accuracy, misclass))
    plt.show()


save_models = tf.keras.models.load_model('saved_model.h5', compile=False)
save_models.compile(Adam(lr=0.0001),
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])
trained_model = model.fit_generator(train_batch,
                                    validation_data=test_batch,
                                    epochs=30)
loss, accuracy = save_models.evaluate(test_batch)
print(f'loss: {loss} and accuracy: {accuracy*100}')

# new_model = model.save("saved_model.h5")
test_labels = test_batch.classes  # this line shows classes and store into test_labels
print(test_labels)  # shows classes
print(test_batch.class_indices
      )  # show class indices like 0 for car 1 for cat 2 for man
predictions = save_models.predict_generator(test_batch)
cm = confusion_matrix(test_labels, predictions.argmax(axis=1))
print(f'model input : {model.input}, model input_names : {model.input_names} ')
print(
    f'model output : {model.output}, model output_names : {model.output_names} '
)
Пример #25
0
# use generator
training_generator = DataProcessor(train_list)
validation_generator = DataProcessor(valid_list)

# dataIt = iter(training_generator)
# for i in range(2):
#     x, y = next(dataIt)
#     print(x[0])
''' training '''
early = EarlyStopping(monitor='val_loss',
                      mode='min',
                      patience=3,
                      min_delta=0.01)
model.fit_generator(training_generator,
                    validation_data=validation_generator,
                    epochs=20,
                    callbacks=[early],
                    use_multiprocessing=True,
                    workers=2)

# save weights
if not os.path.exists('./weights'):
    os.makedirs('./weights')

model.save_weights('./weights/mnist.h5')
''' test (eval) '''
test_files = []
for root, _, files in os.walk('./data/testing'):
    for fname in files:
        filepath = os.path.join(root, fname)
        test_files.append(filepath)
Пример #26
0
                                        input_shape=(64, 64, 3))
resnet.trainable = False
x = resnet.output
x = GlobalAveragePooling2D()(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.2)(x)
predictions = Dense(n_classes,
                    kernel_regularizer=regularizers.l2(0.005),
                    activation='softmax')(x)

model = Model(inputs=resnet.input, outputs=predictions)
model.compile(optimizer='SGD',
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit_generator(
    train_generator,
    steps_per_epoch=150,
    epochs=350,
    verbose=1,
)
model.save('papaya.hdf5')

plt.title('epochs vs loss')
plt.plot(history.history['loss'])

plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'validation_loss'], loc='best')
plt.show()
    center_loss = get_center_loss(0.5, num_classes, feature_dim)

    adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    custom_model.compile(loss={
        'prediction': 'sparse_categorical_crossentropy',
        'features': center_loss
    },
                         optimizer=adam,
                         metrics={'prediction': 'accuracy'})
    pdb.set_trace()
    ##############################################################################
    history = custom_model.fit_generator(
        train_generator,
        steps_per_epoch=(num_train_samples) // batch_size,
        validation_data=validation_generator,
        validation_steps=(num_validation_samples) // batch_size,
        epochs=25)

    print(history.history.keys())
    # summarize history for accuracy
    plt.plot(history.history['prediction_accuracy'])
    plt.plot(history.history['val_prediction_accuracy'])
    plt.title('Model accuracy of prediction')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    plt.plot(history.history['features_loss'])
    plt.plot(history.history['val_features_loss'])
    plt.title('model loss of features')
Пример #28
0
                      min_delta=0.00008,
                      mode='max',
                      cooldown=3,
                      verbose=1),
    ModelCheckpoint("./models/lstm_xception_full1.h5",
                    monitor='val_top_3_accuracy',
                    mode='max',
                    save_best_only=True,
                    verbose=1)
]

hists = []
hist = model.fit_generator(train_datagen,
                           steps_per_epoch=STEPS,
                           epochs=EPOCHS,
                           verbose=1,
                           validation_data=val_datagen,
                           validation_steps=500,
                           callbacks=callbacks)
hists.append(hist)

# Validation
print("Running validation")

df = pd.read_csv(os.path.join(DP_DIR, 'train_k{}.csv.gz'.format(NCSVS - 1)),
                 nrows=34000)
for i in range(10):
    valid_df = df.loc[i * 3400:(i + 1) * 3400, :].copy()
    x_valid, x2 = df_to_image_array_xd(valid_df, SIZE)
    y_valid = keras.utils.to_categorical(valid_df.y, num_classes=NCATS)
    print(x_valid.shape, y_valid.shape)
Пример #29
0
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(3, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
    layer.trainable = False

# In[ ]:

# compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# train the head of the network
print("[INFO] training head...")
H = model.fit_generator(trainAug.flow(trainX, trainY, batch_size=BS),
                        steps_per_epoch=len(trainX) // BS,
                        validation_data=(testX, testY),
                        validation_steps=len(testX) // BS,
                        epochs=EPOCHS)

# In[ ]:
Пример #30
0
              loss="categorical_crossentropy",
              metrics=['accuracy'],
              )

# train the model
# define callback function
early_stopping = EarlyStopping(monitor='val_loss',
                               patience=10,
                               )
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=5,
                              )

model.fit_generator(train_generator,
                    steps_per_epoch=train_image_numbers//batch_size,
                    epochs=epochs,
                    validation_data=validation_generator,
                    validation_steps=validation_steps,
                    callbacks=[early_stopping, reduce_lr],
                    )

# test the model
test_metrics = model.evaluate_generator(test_generator,
                         steps=1,
                         )
for i in zip(model.metrics_names, test_metrics):
    print(i)

# save the model
model.save(model_filepath)