def trainOnFold(self,
                    fold: int,
                    model: keras.Model,
                    callbacks=[],
                    numEpochs: int = 100,
                    negatives="all",
                    subsample=1.0,
                    validation_negatives=None):
        train_indexes = self.sampledIndexes(fold, True, negatives)
        if validation_negatives == None:
            validation_negatives = negatives
        test_indexes = self.sampledIndexes(fold, False, validation_negatives)

        tl, tg, train_g = self.generator_from_indexes(train_indexes)
        vl, vg, test_g = self.generator_from_indexes(test_indexes,
                                                     isTrain=False)
        try:
            model.fit_generator(train_g(),
                                len(train_indexes) //
                                (round(subsample * self.batchSize)),
                                epochs=numEpochs,
                                validation_data=test_g(),
                                callbacks=callbacks,
                                validation_steps=len(test_indexes) //
                                (round(subsample * self.batchSize)))
        finally:
            tl.terminate()
            tg.terminate()
            vl.terminate()
            vg.terminate()
def compare_optimizers(
    meta_dataset: MetaLearnerDataset,
    optimizer_factories: List[Callable[[np.array, np.array], Optimizer]],
    n_learner_batches: int,
    learner_batch_size: int,
    learner: Model,
    trainings_per_dataset: int,
    initial_learner_weights: Optional[List[np.ndarray]] = None
) -> List[List[float]]:
    """
    Compares performance of two or more optimizers on meta-valid set
    :param meta_dataset: MetaLearnerDataset to get data from
    :param optimizer_factories: list of functions that generate Optimizers to compare
    :param n_learner_batches: number of training batches for a single Learner
    :param learner_batch_size: batch size of Learner
    :param learner: model for Learner
    :param trainings_per_dataset: number of trainings per single dataset per lr value
    :param initial_learner_weights: initial weights for training Learner
    :return: List of Lists of all acquired valid. losses using optimizers on meta-valid tasks
    """
    losses = [[] for _ in optimizer_factories]

    prg_bar = tqdm(total=len(meta_dataset.meta_test_set *
                             trainings_per_dataset * len(optimizer_factories)),
                   desc='Evaluating optimizers...')

    for learner_dataset in meta_dataset.meta_test_set:
        valid_batch_x, valid_batch_y = learner_dataset.test_set.x, learner_dataset.test_set.y
        train_generator = learner_dataset.train_set.batch_generator(
            batch_size=learner_batch_size, randomize=True)

        for _ in range(trainings_per_dataset):
            training_batches = list(islice(train_generator, n_learner_batches))
            if initial_learner_weights is None:
                reset_weights(learner)
                current_initial_learner_weights = learner.get_weights()
            for i, optimizer_factory in enumerate(optimizer_factories):
                # use same batches and initial weights for all optimizers
                learner.optimizer = optimizer_factory(
                    learner_dataset.train_set.x, learner_dataset.train_set.y)
                if initial_learner_weights is None:
                    # noinspection PyUnboundLocalVariable
                    learner.set_weights(current_initial_learner_weights)
                else:
                    learner.set_weights(initial_learner_weights)
                learner.fit_generator(generator=(b for b in training_batches),
                                      steps_per_epoch=n_learner_batches,
                                      epochs=1,
                                      verbose=0)
                evaluation = learner.evaluate(valid_batch_x,
                                              valid_batch_y,
                                              verbose=0)
                if isinstance(evaluation, list):
                    evaluation = evaluation[0]
                losses[i].append(evaluation)
                prg_bar.update(1)

    prg_bar.close()

    return losses
예제 #3
0
def trasfer_learn():
    base_model = applications.InceptionV3(include_top=False, weights='imagenet',
        input_tensor=Input(shape=(TEST_IMAGE_WIDTH, TEST_IMAGE_HEIGHT, 3)))

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(5, activation='softmax')(x)

    train_generator = get_train_generator()
    validation_generator = get_test_generator()

    model = Model(inputs=base_model.input, outputs=predictions)

    for layer in base_model.layers:
        layer.trainable = False
    model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy'])
    print(model.summary())
    model.fit_generator(
        train_generator,
        steps_per_epoch=150,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=44)
    
    train_eval = model.evaluate_generator(train_generator)
    print(train_eval)

    test_eval = model.evaluate_generator(validation_generator)
    print(test_eval)
    model.save(FINETUNE_FULL_MODEL)
    model.save_weights(FINETUNE_WEIGHTS)
예제 #4
0
def inception_model(train_data, train_label, test_data, test_label):
    input_tensor = Input(shape=(299,299,3))
    base_model = inception_v3.InceptionV3(input_tensor=input_tensor,include_top=False, weights='imagenet')
    x = base_model.output
    x = AveragePooling2D(pool_size=(8,8))(x)
    x = Flatten()(x)
    #x = Dense(10, activation='relu')(x)
    predictions = Dense(5,activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in model.layers[0:-1]:
        layer.trainable = False
    model.compile(optimizer=Adam(lr=0.0001),loss='categorical_crossentropy',metrics=['acc'])
    #model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy',metrics=['acc'])
    
    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(train_data, train_label, batch_size=batch_size,
                  epochs=epochs, validation_data=(test_data, test_label),
                  shuffle=True, verbose=2)
    else:
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            zca_epsilon=1e-06,  # epsilon for ZCA whitening
            rotation_range=45,  # randomly rotate images in the range (degrees, 0 to 180)
            # randomly shift images horizontally (fraction of total width)
            width_shift_range=0.1,
            # randomly shift images vertically (fraction of total height)
            height_shift_range=0.1,
            shear_range=0.,  # set range for random shear
            zoom_range=[0.8, 1.2],  # set range for random zoom
            channel_shift_range=0.,  # set range for random channel shifts
            # set mode for filling points outside the input boundaries
            fill_mode='nearest',
            cval=0.,  # value used for fill_mode = "constant"
            horizontal_flip=True,  # randomly flip images
            vertical_flip=True,  # randomly flip images
            # set rescaling factor (applied before any other transformation)
            rescale=None,
            # set function that will be applied on each input
            preprocessing_function=None,
            # image data format, either "channels_first" or "channels_last"
            data_format=None,
            # fraction of images reserved for validation (strictly between 0 and 1)
            validation_split=0.0
        )

        datagen.fit(train_data)

        # Fit the model on the batches generated by datagen.flow().
        model.fit_generator(datagen.flow(train_data, train_label,
                                         batch_size=batch_size),
                            epochs=epochs,
                            validation_data=(test_data, test_label),steps_per_epoch=50,verbose=2)

    model.save("5.h5")
예제 #5
0
    def trainOnFold(self,fold:int,model:keras.Model,callbacks=[],numEpochs:int=100,negatives="all",
                    subsample=1.0,validation_negatives=None,verbose=1, initial_epoch=0):
        train_indexes = self.sampledIndexes(fold, True, negatives)
        if validation_negatives==None:
            validation_negatives=negatives
        test_indexes = self.sampledIndexes(fold, False, validation_negatives)

        tl,tg,train_g=self.generator_from_indexes(train_indexes)
        vl,vg,test_g = self.generator_from_indexes(test_indexes,isTrain=False)
        try:
            v_steps = len(test_indexes)//(round(subsample*self.batchSize))

            if v_steps < 1: v_steps = 1

            iterations = len(train_indexes) // (round(subsample * self.batchSize))
            if self.maxEpochSize is not None:
                iterations = min(iterations, self.maxEpochSize)
            model.fit_generator(train_g(), iterations,
                                epochs=numEpochs,
                                validation_data=test_g(),
                                callbacks=callbacks,
                                verbose=verbose,
                                validation_steps=v_steps,
                                initial_epoch=initial_epoch)
        finally:
            tl.terminate()
            tg.terminate()
            vl.terminate()
            vg.terminate()
예제 #6
0
    def get_accs_times(self,
                       A,
                       X,
                       y,
                       num_graph_classes,
                       splits=None,
                       batch_size=50):

        A = map(csr_matrix.todense, A)
        A = map(self._add_self_loops, A)
        A = map(self._sym_normalise_A, A)
        A = list(map(csr_matrix, A))

        accuracies = []
        times = []
        for train_idx, val_idx in iter(splits):
            A_test, A_train, X_test, X_train, y_test, y_train \
                = self.split_test_train(A, X, y, train_idx, val_idx)

            A_in = Input((A[0].shape[0], A[0].shape[1]), name='A_in')
            X_in = Input(X[0].shape, name='X_in')

            x1 = MyGCN(100, activation='relu')([A_in, X_in])
            x2 = MyGCN(64, activation='relu')([A_in, x1])
            x3 = Lambda(lambda x: K.mean(x, axis=1))(
                x2) if self.with_mean else Flatten()(x2)
            x4 = Dense(num_graph_classes, activation='softmax')(x3)

            model = Model(inputs=[A_in, X_in], outputs=x4)

            # print(model.summary())

            model.compile(Adam(),
                          loss='categorical_crossentropy',
                          metrics=['acc'])
            generator = PiNet().batch_generator([A_train, X_train], y_train,
                                                batch_size)
            start = time.time()
            model.fit_generator(generator,
                                ceil(y_train.shape[0] / batch_size),
                                200,
                                verbose=0)
            train_time = time.time() - start

            stats = model.evaluate_generator(
                PiNet().batch_generator([A_test, X_test], y_test, batch_size),
                y_test.shape[0] / batch_size)

            for metric, val in zip(model.metrics_names, stats):
                print(metric + ": ", val)

            accuracies.append(stats[1])
            times.append(train_time)

        # print("mean acc:", np.mean(accuracies))
        # print("std:", np.std(accuracies))
        return accuracies, times
예제 #7
0
def train(model: keras.Model, epochs: int):
    steps_per_epoch = np.ceil(len(pd.read_csv(TRAIN_CSV)) / BATCH_SIZE)
    model.fit_generator(train_generator(),
                        steps_per_epoch=steps_per_epoch,
                        epochs=epochs,
                        validation_data=get_validation_data())
    model.save('leon_net.h5', include_optimizer=False)
    print('model saved.')
    return model
예제 #8
0
def Train():
    #-------------准备数据--------------------------
    #数据集目录应该是 train/LabelA/1.jpg  train/LabelB/1.jpg这样
    gen = ImageDataGenerator(rescale=1. / 255)
    train_generator = gen.flow_from_directory(DATA_TRAIN_PATH, (224, 224),
                                              shuffle=False,
                                              batch_size=BATCH_SIZE,
                                              class_mode='categorical')

    #-------------加载VGG模型并且添加自己的层----------------------
    #这里自己添加的层需要不断调整超参数来提升结果,输出类别更改softmax层即可

    #参数说明:inlucde_top:是否包含最上方的Dense层,input_shape:输入的图像大小(width,height,channel)
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_shape=(224, 224, 3))
    x = base_model.output
    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1, activation='sigmoid')(x)
    predictions = Dense(5, activation='softmax')(x)
    model = Model(input=base_model.input, output=predictions)

    #-----------控制需要FineTune的层数,不FineTune的就直接冻结
    for layer in base_model.layers:
        layer.trainable = False

    #----------编译,设置优化器,损失函数,性能指标
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.summary()  #显示模型
    #----------设置tensorboard,用来观察acc和loss的曲线---------------
    tbCallBack = TensorBoard(
        log_dir='./logs/' + TIMESTAMP,  # log 目录
        histogram_freq=0,  # 按照何等频率(epoch)来计算直方图,0为不计算
        batch_size=16,  # 用多大量的数据计算直方图
        write_graph=True,  # 是否存储网络结构图
        write_grads=True,  # 是否可视化梯度直方图
        write_images=True,  # 是否可视化参数
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None)

    #---------设置自动保存点,acc最好的时候就会自动保存一次,会覆盖之前的存档---------------
    checkpoint = ModelCheckpoint(filepath='HatNewModel.h5',
                                 monitor='acc',
                                 mode='auto',
                                 save_best_only='True')

    #----------开始训练---------------------------------------------
    model.fit_generator(generator=train_generator,
                        epochs=EPOCHS,
                        callbacks=[tbCallBack, checkpoint],
                        verbose=2)
예제 #9
0
def mobilenetv2(classes,epochs, steps_per_epoch, validation_steps,input_shape):
    # 加载数据
    train_batches, valid_batches = load_data(input_shape)

    input_shape += (3,)
    base_model = keras.applications.mobilenet_v2.MobileNetV2(include_top=False,input_shape=input_shape)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)  # GlobalAveragePooling2D 将 MxNxC 的张量转换成 1xC 张量,C是通道数
    x = Dropout(0.5)(x)

    if classes==1:
        print("二元分类")
        outputs = Dense(classes, activation='sigmoid')(x)
        model = Model(base_model.inputs, outputs)

        sgd = Adam(lr=0.0001)
        model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    else:
        print("多分类")
        outputs = Dense(classes, activation='softmax')(x)
        model = Model(base_model.inputs, outputs)

        sgd =Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

    # model_cx.summary()

    print('the number of layers in this model_cx:' + str(len(model.layers)))

    # 保存模型
    out_dir = "weights/"
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    filepath = "weights/mobilenetv2_{epoch:04d}.h5"
    # 中途训练效果提升, 则将文件保存, 每提升一次, 保存一次
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True,
                                 mode='max',period=2)
    # 学习率调整
    lr_reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=1,
                                  min_lr=0.000005, mode="min")
    # 早停
    earlystopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
    # 保存训练过程
    log_dir = "logs/"
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logfile = "logs/mobilenetv2.csv"
    log = keras.callbacks.CSVLogger(logfile, separator=',', append=False)
    loggraph = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)

    callbacks_list = [checkpoint, lr_reduce, log]

    # 训练
    model.fit_generator(train_batches, steps_per_epoch=steps_per_epoch, validation_data=valid_batches,
                        validation_steps=validation_steps, epochs=epochs, verbose=2,
                        callbacks=callbacks_list,workers=16,max_queue_size=20)
예제 #10
0
def main():
    encoder, _, vae = create_models()
    encoder.load_weights('encoder-trained.h5')
    encoder.trainable = False

    seg = create_model()

    x = Input(shape=(64, 64, 3), name='input_image')
    mask = seg(x)

    y = concatenate([x, mask])

    z_mean, z_log_var = encoder(y)

    kl_loss = K.mean(
        -0.5 *
        K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1))

    model = Model(x, [z_mean, z_log_var])
    model.add_loss(kl_loss)

    model.compile('nadam')
    # model.load_weights('segnet.010.h5')

    model.summary()

    ck = ModelCheckpoint('segnet.{epoch:02d}.h5', save_weights_only=True)

    data = loader(False)
    model.fit_generator(data, 1000, 100, callbacks=[ck])

    # Test code

    samples = 5

    data = loader(True, samples)

    import matplotlib.pyplot as plt

    d, gt = next(data)
    d = d[:, :, :, :3]
    q = seg.predict_on_batch(d)

    for i in range(len(d)):
        p = i * 3
        plt.subplot(samples, 3, p + 1)
        plt.imshow(d[i].squeeze(), cmap='gray')

        plt.subplot(samples, 3, p + 2)
        plt.imshow(gt[i].squeeze(), cmap='gray')

        plt.subplot(samples, 3, p + 3)
        plt.imshow(q[i].squeeze(), cmap='gray')

    plt.show()
예제 #11
0
 def fit_generator(self, data, batch_size=32, size=64):
     assert need_sequence(data)
     tr_seq = AutoEncoderSequence(data, batch_size, size, self.flatten, self.normalize)
     inp, embd, out = self.model_fn([1, size, size, 3], force=True)
     model = Model(inp, out)
     model.compile('adam', 'mse')
     try:
         model.fit_generator(tr_seq, len(tr_seq) // 2, epochs=50, use_multiprocessing=True, workers=4, shuffle=True)
     except KeyboardInterrupt:
         pass
     model = Model(inp, embd)
     return model.predict_generator(tr_seq, use_multiprocessing=True, workers=4)
예제 #12
0
def MappingModel():

    classes = {
        "type": 6,
        "color": 8,
        "has": 4,
        "for": 6,
        # "is":6
    }

    basemodel = VGG16(include_top=False,
                      weights="imagenet",
                      input_shape=(64, 64, 3))
    y = Flatten()(basemodel.output)
    y = Dense(512, activation='relu')(y)
    y = Dropout(0.5)(y)
    predense = [
        Dense(v, activation="softmax", name=k)(y) for k, v in classes.items()
    ]

    my_model = Model(inputs=basemodel.input, outputs=predense)
    trainable = False
    for lay in my_model.layers:
        if (lay.name == "block5_conv1"):
            trainable = True
        lay.trainable = trainable

    my_model.summary()
    keras.utils.plot_model(my_model,
                           to_file='output/mapping_model.png',
                           show_shapes=True)

    my_model.compile(loss='categorical_crossentropy',
                     optimizer="adadelta",
                     metrics=['accuracy'])

    tr_flow = pipe.DataPiple(
        target=r"D:\TianChi\201809ZSL\DatasetA_train_20180813\train.txt",
        size=64,
        impro=True).create_inputs(size=64)  #使用数据增强
    checkpoint = ModelCheckpoint(filepath="output/mapping_model.h5",
                                 monitor='loss',
                                 mode='auto',
                                 save_best_only='True')
    # es=keras.callbacks.EarlyStopping(monitor='loss', min_delta=0, patience=50, verbose=2, mode='min')
    tensorboard = TensorBoard(log_dir='output/mapping_vggmodel')
    my_model.fit_generator(tr_flow,
                           steps_per_epoch=32,
                           epochs=2000,
                           verbose=2,
                           callbacks=[tensorboard, checkpoint])
예제 #13
0
    def fit(self, data: DataManager, **kwargs):
        if self.base_model is None:
            raise AttributeError("Base model is not defined!")

        if self.optimizer == 'SGD':
            optimizer = SGD(self.sgd_lr,
                            self.sgd_momentum,
                            self.sgd_decay,
                            nesterov=True)
        elif self.optimizer == 'Adam':
            optimizer = Adam(self.adam_lr, decay=self.adam_decay)
        else:
            raise ValueError('No optimizer named %s defined' %
                             str(self.optimizer))

        timestr = time.strftime('%Y-%m-%d-%H:%M:%S',
                                time.localtime(time.time()))

        # build model
        if self.classnum == 1:
            final_activation = 'sigmoid'
            loss = 'binary_crossentropy'
        else:
            final_activation = 'softmax'
            loss = 'categorical_crossentropy'

        y = self.base_model.output
        y = layers.Dropout(1 - self.keep_prob)(y)
        y = layers.Dense(self.classnum,
                         activation=final_activation,
                         name='Dense_final')(y)
        model = Model(inputs=self.base_model.input, outputs=y)
        # TODO: save models after training
        save_dir = 'dl_models'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        modelpath = os.path.join(save_dir, 'model_%s.hdf5' % timestr)
        checkpoint = ModelCheckpoint(filepath=modelpath,
                                     monitor=self.monitor,
                                     save_best_only=True,
                                     period=1)
        earlystop = EarlyStopping(monitor=self.monitor, patience=12)
        model.compile(optimizer=optimizer, loss=loss, metrics=[self.metricstr])
        model.fit_generator(generator=self.train_gen,
                            epochs=200,
                            validation_data=self.valid_gen,
                            callbacks=[checkpoint, earlystop])
        self.estimator = model
        self.best_result = checkpoint.best
        return self, modelpath
def train_feature_generator(sFeatureDir: str,
                            sModelDir: str,
                            sLogPath: str,
                            keModel: keras.Model,
                            oClasses: VideoClasses,
                            nBatchSize: int = 16,
                            nEpoch: int = 100,
                            fLearn: float = 1e-4):

    # Load training data
    genFeaturesVal = FeaturesGenerator(sFeatureDir + "/val", nBatchSize,
                                       keModel.input_shape[1:],
                                       oClasses.liClasses)
    genFeaturesTrain = FeaturesGenerator(sFeatureDir + "/train", nBatchSize,
                                         keModel.input_shape[1:],
                                         oClasses.liClasses)

    # Helper: Save results
    csv_logger = keras.callbacks.CSVLogger(sLogPath.split(".")[0] + "-acc.csv")

    # Helper: Save the model
    os.makedirs(sModelDir, exist_ok=True)
    checkpointLast = keras.callbacks.ModelCheckpoint(
        filepath=sModelDir + "/" + (sLogPath.split("/")[-1]).split(".")[0] +
        "-last.h5",
        verbose=0)
    checkpointBest = keras.callbacks.ModelCheckpoint(
        filepath=sModelDir + "/" + (sLogPath.split("/")[-1]).split(".")[0] +
        "-best.h5",
        verbose=1,
        save_best_only=True)

    optimizer = keras.optimizers.Adam(lr=fLearn)
    keModel.compile(loss='categorical_crossentropy',
                    optimizer=optimizer,
                    metrics=['accuracy'])

    # Fit!
    print("Fit with generator, learning rate %f ..." % fLearn)
    keModel.fit_generator(
        generator=genFeaturesTrain,
        validation_data=genFeaturesVal,
        epochs=nEpoch,
        workers=1,  #4,                 
        use_multiprocessing=False,  #True,
        verbose=1,
        callbacks=[csv_logger, checkpointLast, checkpointBest])

    return
예제 #15
0
def inceptionv3(classes,epochs,steps_per_epoch,validation_steps,input_shape):
    #加载数据
    train_batches,valid_batches=load_data(input_shape)

    input_shape+=(3,)

    temp_model = keras.applications.inception_v3.InceptionV3(include_top=False, pooling='avg', input_shape=input_shape)
    if classes==1:
        print("二元分类")
        outputs = Dense(classes, activation='sigmoid')(temp_model.output)
        model = Model(temp_model.inputs, outputs)

        model.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy', metrics=['accuracy'])

    else:
        print("多分类")
        outputs = Dense(classes, activation='softmax')(temp_model.output)
        model = Model(temp_model.inputs, outputs)

        model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
    print('the number of layers in this model_cx:' + str(len(model.layers)))

    #保存模型
    out_dir = "../weights/"
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    filepath ="../weights/inceptionv3_{epoch:04d}.h5"
    # 中途训练效果提升, 则将文件保存, 每提升一次, 保存一次
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=False,
                                 mode='max')
    #学习率调整
    lr_reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=1,
                                  min_lr=0.000005, mode="min")
    # 早停
    earlystopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')
    #保存训练过程
    log_dir = "../logs/"
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logfile="../logs/inceptionv3.csv"
    log=keras.callbacks.CSVLogger(logfile, separator=',', append=False)
    loggraph=keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)

    callbacks_list = [checkpoint,lr_reduce,log]
    # 训练
    model.fit_generator(train_batches, steps_per_epoch=steps_per_epoch, validation_data=valid_batches,
                        validation_steps=validation_steps, epochs=epochs, verbose=2,
                        callbacks=callbacks_list,workers=16,max_queue_size=20)
예제 #16
0
파일: main.py 프로젝트: micferr/cocokeras
def train_model(params, data, kfold_cross_iteration):
    """
    :type params: train_utils.TrainParams
    """
    # Create the model
    input = Input(shape=(params.image_size, params.image_size, 3))
    for i in range(params.conv_layers):
        x = Conv2D(
            params.conv_num_filters,
            params.conv_filter_size,
            strides=params.conv_stride,
            activation='relu',
            padding='same'
        )(x if i != 0 else input)
        x = MaxPooling2D(pool_size=params.conv_pooling_size)(x)
    x = Flatten()(x)
    out = Dense(NUM_CATEGORIES if (not SINGLE_CATEGORIES) else 1, activation='sigmoid')(x)
    model = Model(inputs=input, outputs=out)

    model.compile(
        optimizer=RMSprop(),
        metrics=['accuracy'],
        loss=weighted_loss
    )
    print(model.summary())
    plot_model(model, params.base_dir + 'graph' + str(params.nn_id) + '.png', show_shapes=True)

    train_generator = CocoBatchGenerator(data[0], dataDir, params, imgids_to_cats)
    val_generator = CocoBatchGenerator(data[1], dataDir, params, imgids_to_cats)
    callbacks = [TensorBoard(log_dir='./tb')]
    if params.early_stop:
        callbacks += [EarlyStopping('val_loss', patience=2)]

    history = model.fit_generator(
        train_generator,
        epochs=params.epochs,
        callbacks=callbacks,
        validation_data=val_generator
    )

    if SAVE_MODEL:
        save_model(model, params.base_dir + "model" + str(params.nn_id) + '_' + str(kfold_cross_iteration) + ".h5")
    with open(params.base_dir + "history" + str(params.nn_id) + '_' + str(kfold_cross_iteration) + ".txt", "w+") as f:
        f.write('epoch,val_acc,val_loss\n')
        for i in range(len(history.history['val_loss'])):
            f.write("{},{},{}\n".format(i + 1, history.history['val_acc'][i], history.history['val_loss'][i]))

    plot_x = list(range(1, len(history.history['val_loss']) + 1))
    plot_y = history.history['val_loss']

    plt.figure()
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.xlim(0.0, params.epochs)
    plt.ylim(0.0, 1.0)
    plt.plot(plot_x, plot_y, color='blue', linestyle='-')
    plt.savefig(params.base_dir + 'loss' + str(params.nn_id) + '_' + str(kfold_cross_iteration) + '.png', dpi=300)

    return history
예제 #17
0
def fit(
        model: Model,
        out_model_path,
        out_logs_path,
        out_tmp_weights_path,  # TODO
        train_path,
        test_path,
        input_size: int,
        epochs=10,
        batch_size=2,
        resume=False,
        last_epoch=-1):
    patience = 20

    train_generator = DatasetSequence(train_path, batch_size, input_size)
    test_generator = DatasetSequence(test_path, batch_size, input_size)

    steps_per_epoch = len(train_generator)

    callbacks = [
        ReduceLROnPlateau(monitor='loss',
                          factor=0.5,
                          patience=5,
                          min_lr=1e-9,
                          epsilon=0.00001,
                          verbose=1,
                          mode='min'),
        EarlyStopping(monitor='loss', patience=patience, verbose=1),
        ModelCheckpoint('%s/temp{epoch:02d}-{loss:.2f}.h5' %
                        out_tmp_weights_path,
                        monitor='loss',
                        save_best_only=True,
                        verbose=1),
        CSVLogger(out_logs_path, append=resume),
        TensorBoard(log_dir='./logs',
                    histogram_freq=0,
                    batch_size=batch_size,
                    write_graph=True,
                    write_grads=False,
                    write_images=True,
                    embeddings_freq=0,
                    embeddings_layer_names=None,
                    embeddings_metadata=None)
    ]

    print('Start training...')
    history = model.fit_generator(generator=train_generator,
                                  validation_data=test_generator,
                                  epochs=epochs,
                                  steps_per_epoch=steps_per_epoch,
                                  initial_epoch=last_epoch +
                                  1 if resume else 0,
                                  verbose=1,
                                  callbacks=callbacks)

    model.save_weights(out_model_path)
    pd.DataFrame(history.history).to_csv('out/zf_unet_224_train.csv',
                                         index=False)
    print('Training is finished...')
예제 #18
0
def main():
    now = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    model_name = 'simpleCNN_' + now + '.h5'
    batch_size = 256
    num_epochs = 30
    lr = .001

    num_train_samples = len(os.listdir('./data/train/cancer')) + len(os.listdir('./data/train/healthy'))
    num_valid_samples = len(os.listdir('./data/validation/cancer')) + len(os.listdir('./data/validation/healthy'))

    # Build our model
    input_tensor = Input(shape=(96, 96, 3))
    x = layers.Conv2D(32, (3, 3))(input_tensor)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Conv2D(64, (3, 3))(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Conv2D(128, (3, 3))(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Conv2D(128, (3, 3))(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Flatten()(x)
    x = layers.Dropout(.5)(x)
    x = layers.Dense(512, activation='relu')(x)
    output_tensor = layers.Dense(1, activation='sigmoid')(x)

    model = Model(input_tensor, output_tensor)
    model.summary()

    # Get things ready to train: should adjust learning rate, etc.
    model.compile(optimizer=Adam(lr), loss='binary_crossentropy', metrics=['acc'])

    train_generator = train_gen(batch_size)
    validation_generator = valid_gen(batch_size)

    steps_per_epoch = num_train_samples / batch_size
    validation_steps = num_valid_samples / batch_size

    # Basic callbacks
    checkpoint = callbacks.ModelCheckpoint(filepath='./models/' + model_name,
                                           monitor='val_loss',
                                           save_best_only=True)
    early_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         patience=3)
    csv_logger = callbacks.CSVLogger('./logs/' + model_name.split('.')[0] + '.csv')

    callback_list = [checkpoint, early_stop, csv_logger]

    # Training begins
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps)

    model.save('./models/' + model_name)

    make_plots(history, model_name)
예제 #19
0
파일: unet.py 프로젝트: anssar/salt
def train_stage_2(x_train, y_train, x_valid, y_valid):
    model = load_model(OUTPUT_DIR +
                       "/{}/models/{}_fold_{}_stage1.model".format(
                           BASE_NAME, BASE_NAME, CUR_FOLD_INDEX),
                       custom_objects={
                           'my_iou_metric': my_iou_metric,
                           'bce_dice_jaccard_loss': bce_dice_jaccard_loss
                       })
    opt = optimizers.adam(lr=0.001)
    input_x = model.layers[0].input
    output_layer = model.layers[-1].input
    model = Model(input_x, output_layer)
    model.compile(loss=lovasz_loss, optimizer=opt, metrics=[my_iou_metric_2])
    model_checkpoint = ModelCheckpoint(
        OUTPUT_DIR + "/{}/models/{}_fold_{}_stage2.model".format(
            BASE_NAME, BASE_NAME, CUR_FOLD_INDEX),
        monitor='val_my_iou_metric_2',
        mode='max',
        save_best_only=True,
        verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric_2',
                                  mode='max',
                                  factor=0.5,
                                  patience=6,
                                  min_lr=0.00001,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_my_iou_metric_2',
                                   mode='max',
                                   patience=20,
                                   verbose=1)
    logger = CSVLogger(OUTPUT_DIR + '/{}/logs/{}_fold_{}_stage2.log'.format(
        BASE_NAME, BASE_NAME, CUR_FOLD_INDEX))
    model.fit_generator(
        TrainGenerator(x_train,
                       y_train,
                       batch_size=int(np.ceil(BATCH_SIZE / (len(AUGS) + 1))),
                       img_size_target=IMG_SIZE_TARGET),
        steps_per_epoch=int(
            np.ceil(len(x_train) / np.ceil(BATCH_SIZE / (len(AUGS) + 1)))),
        epochs=EPOCHS,
        validation_data=ValidGenerator(x_valid,
                                       y_valid,
                                       batch_size=BATCH_SIZE,
                                       img_size_target=IMG_SIZE_TARGET),
        callbacks=[early_stopping, model_checkpoint, reduce_lr, logger],
        shuffle=True)
예제 #20
0
    def trainOnIndexes(self,
                       fold: int,
                       model: keras.Model,
                       callbacks,
                       negatives,
                       indexes,
                       doValidation=False):
        train_indexes = self.sampledIndexes(fold, True, negatives)
        vl = len(train_indexes)
        train_indexes = train_indexes[indexes * vl // 10:(indexes + 1) * vl //
                                      10]

        test_indexes = self.sampledIndexes(fold, False, negatives)

        tl, tg, train_g = self.generator_from_indexes(train_indexes)
        vl, vg, test_g = self.generator_from_indexes(test_indexes,
                                                     isTrain=False)
        try:
            v_steps = len(test_indexes) // self.batchSize

            if v_steps < 1: v_steps = 1

            iterations = len(train_indexes) // (self.batchSize)
            if self.maxEpochSize is not None:
                iterations = min(iterations, self.maxEpochSize)
            if doValidation:
                model.fit_generator(train_g(),
                                    iterations,
                                    epochs=1,
                                    validation_data=test_g(),
                                    callbacks=callbacks,
                                    verbose=1,
                                    validation_steps=v_steps,
                                    initial_epoch=0)
            else:
                model.fit_generator(train_g(),
                                    iterations,
                                    epochs=1,
                                    callbacks=callbacks,
                                    verbose=1,
                                    initial_epoch=0)
        finally:
            tl.terminate()
            tg.terminate()
            vl.terminate()
            vg.terminate()
예제 #21
0
def main():
    """ Main function. """

    checkpoint_path = "./saved_models/"
    model_final = None
    #makes checkpoint folder if it doesn't exist
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    
    #Loading model if user requested it
    if ARGS.load_checkpoint is not None:
        if ARGS.load_checkpoint.endswith('.h5') and os.path.isfile(ARGS.load_checkpoint):
                print("Found an existing model! Loading it...")
                model_final = tf.keras.models.load_model(ARGS.load_checkpoint)
                model_final.summary()
        else:
            print("Error: Pass in h5 file of the model!!")
            return 
    else:
        ### Load the data
        datasets = Datasets(ARGS.data)

        vggmodel = VGG16(weights='imagenet', include_top=True)
        vggmodel.summary()   

        ### Freezes every layeer in vggmodel
        for layers in (vggmodel.layers)[:15]:
            print(layers)
            layers.trainable = False

        X= vggmodel.layers[-2].output

        #A connected layer is added for predictions
        predictions = Dense(hp.num_classes, activation="softmax")(X)
        model_final = Model(input = vggmodel.input, output = predictions)
        opt = Adam(lr= hp.learning_rate)

        model_final.compile(loss = keras.losses.categorical_crossentropy, optimizer = opt, metrics=["accuracy"])
        model_final.summary()
        
        #Training configurations are set and training is performed through fit_generator.
        checkpoint = ModelCheckpoint(checkpoint_path + "rcnn_vgg16_1.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
        early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=1, mode='auto')

        model_final.fit_generator(generator= datasets.train_data, steps_per_epoch= 10, epochs= 1000, validation_data= datasets.test_data, validation_steps=2, callbacks=[checkpoint,early_stop])
예제 #22
0
class SequenceToSequenceTrainer(BaseSeq2seq):
    def __init__(self, char_table, encoding_size=128, input_channels=2):
        super().__init__(encoding_size,
                         input_channels=input_channels,
                         output_channels=len(char_table))
        self._char_table = char_table

        self._encoder = self.encoder_model()
        self._decoder = self.decoder_model()

        encoder_inputs = self._encoder.input
        decoder_inputs = self._decoder.input[0]

        state_vector = self._encoder(encoder_inputs)
        output, _ = self._decoder([decoder_inputs, state_vector])

        self._model = Model([encoder_inputs, decoder_inputs], output)

    def feature_extractor(self, x):
        return x

    def fit_generator(self, lr, train_gen, val_gen, *args, **kwargs):
        estimator = self.get_performance_estimator(8)

        class MyCallback(Callback):
            def on_epoch_end(self, epoch, logs=None):
                if epoch % 5 == 0:
                    estimator.estimate(train_gen)
                    print()
                    estimator.estimate(val_gen)

        self._model.compile(optimizer=RMSprop(lr=lr),
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])

        self._model.fit_generator(callbacks=[MyCallback()], *args, **kwargs)

    def get_inference_model(self):
        return SequenceToSequencePredictor(self._encoder, self._decoder,
                                           self._char_table,
                                           self._input_channels)

    def get_performance_estimator(self, num_trials):
        return Seq2seqMetric(self.get_inference_model(), num_trials)
예제 #23
0
def main():
    now = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    model_name = 'pretrain_NASNet_' + now + '.h5'
    batch_size = 32
    num_epochs = 30
    lr = .0001

    num_train_samples = len(os.listdir('./data/train/cancer')) + len(os.listdir('./data/train/healthy'))
    num_valid_samples = len(os.listdir('./data/validation/cancer')) + len(os.listdir('./data/validation/healthy'))

    # Build our model
    input_tensor = Input(shape=(96, 96, 3))
    NASNet = NASNetMobile(include_top=False, input_shape=(96, 96, 3))
    x = NASNet(input_tensor)
    x1 = layers.GlobalMaxPooling2D()(x)
    x2 = layers.GlobalAveragePooling2D()(x)
    x3 = layers.Flatten()(x)
    z = layers.Concatenate(axis=-1)([x1, x2, x3])
    z = layers.Dropout(.5)(z)
    output_tensor = layers.Dense(1, activation='sigmoid')(z)

    model = Model(input_tensor, output_tensor)
    model.summary()

    # Get things ready to train: tweak learning rate, etc.
    model.compile(optimizer=Adam(lr), loss='binary_crossentropy', metrics=['acc'])

    train_generator = train_gen(batch_size)
    validation_generator = valid_gen(batch_size)

    steps_per_epoch = num_train_samples / batch_size
    validation_steps = num_valid_samples / batch_size

    # Basic callbacks
    checkpoint = callbacks.ModelCheckpoint(filepath='./models/' + model_name,
                                           monitor='val_loss',
                                           save_best_only=True)
    early_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         patience=4)
    csv_logger = callbacks.CSVLogger('./logs/' + model_name.split('.')[0] + '.csv')

    callback_list = [checkpoint, early_stop, csv_logger]

    # Training begins
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps)

    model.save('./models/' + model_name)

    make_plots(history, model_name)
예제 #24
0
def run():
    # now we run the training
    from keras import Model
    from keras.layers import GRU, Dense, Input, Masking, Embedding

    in_node = Input(shape=(MAX_LENGTH, ))
    # we'll use an embedding layer to represent each person as a
    # vector
    embedding = Embedding(MAX_EMBED + 1 * 2, 1, mask_zero=True)(in_node)
    # a gru can translate this variable number of people into a fixed
    # size representation
    gru = GRU(5)(embedding)
    dense = Dense(5)(gru)
    # note that we don't want to use any activation functions for the
    # final output given that this is a regression problem
    out = Dense(1)(dense)

    model = Model(inputs=[in_node], outputs=[out])
    model.compile(loss='mse', optimizer='adam')

    # this function runs the name generator infinitely to produce
    # training data
    def make_arrays(generator, max_length=MAX_LENGTH, n_samples=400):
        ar = np.zeros((n_samples, max_length), dtype=int)
        targ = np.zeros((n_samples, 1), dtype=float)
        for n, samp in zip(cycle(range(n_samples)), generator()):
            emb = index_participents(samp)
            ar[n, :] = emb
            targ[n, 0] = len(samp)
            if n + 1 == n_samples:
                yield ar, targ

    model.fit_generator(make_arrays(name_generator),
                        steps_per_epoch=100,
                        epochs=EPOCHS)

    outdir = Path('models')
    outdir.mkdir(exist_ok=True)

    model.save(outdir / 'deep.h5')
    def fit_classifier(self,d,fold:int,model:keras.Model,batchSize=None,stage=0):
        if batchSize==None:
            batchSize=self.batch
        fld = self.kfold(d,indeces=None,batch=batchSize);
        indeces = fld.indexes(fold, True)
        vindeces = fld.indexes(fold, False)

        r, v, rs = fld.classification_generator_from_indexes(indeces);
        r1, v1, rs1 = fld.classification_generator_from_indexes(vindeces);
        try:
            ec = ExecutionConfig(fold=fold, stage=stage, dr=os.path.dirname(self.path))
            cb=[]+self.callbacks
            cb.append(keras.callbacks.CSVLogger(ec.classifier_metricsPath()))
            cb.append(keras.callbacks.ModelCheckpoint(ec.classifier_weightsPath(), save_best_only=True, monitor="val_binary_accuracy",verbose=1))

            model.fit_generator(rs(), len(indeces) / batchSize, 20, validation_data=rs1(), validation_steps=len(vindeces) / batchSize,callbacks=cb)
            pass
        finally:
            r.terminate()
            v.terminate()
            r1.terminate()
            v1.terminate()
예제 #26
0
def ImageModel():
    basemodel = VGG16(include_top=False,
                      weights="imagenet",
                      input_shape=(64, 64, 3))
    y = Flatten()(basemodel.output)
    # y = Dense(1024, activation='relu')(y)
    y = Dense(190, activation='softmax')(y)

    my_model = Model(basemodel.input, y)
    trainable = False
    for lay in my_model.layers:
        if (lay.name == "block5_conv1"):
            trainable = True
        lay.trainable = trainable

    my_model.summary()
    keras.utils.plot_model(my_model,
                           to_file='output/vgg_model.png',
                           show_shapes=True)

    my_model.compile(loss='categorical_crossentropy',
                     optimizer="adadelta",
                     metrics=['accuracy'])

    tr_flow = pipe2.DataPiple(
        target=r"D:\TianChi\201809ZSL\DatasetA_train_20180813\train.txt",
        size=64,
        impro=True).create_inputs(size=64)  # 使用数据增强
    checkpoint = ModelCheckpoint(filepath="output/vgg_model.h5",
                                 monitor='acc',
                                 mode='auto',
                                 save_best_only='True')
    # es = keras.callbacks.EarlyStopping(monitor='loss', min_delta=0, patience=100, verbose=2, mode='min') # 提前终止
    tensorboard = TensorBoard(log_dir='output/log_vggmodel')
    my_model.fit_generator(tr_flow,
                           steps_per_epoch=32,
                           epochs=1000,
                           verbose=2,
                           callbacks=[checkpoint, tensorboard])
예제 #27
0
def optA():
    mycoco.setmode('train')
    ids = mycoco.query(args.categories, exclusive=False)
    if args.maxinstances:
        x = args.maxinstances
    else:
        x = len(min(ids, key=len))
    list1 = []
    for i in range(len(ids)):
        list1.append(ids[i][:x])
    print("Maximum number of instances are :" , str(x))
    imgiter = mycoco.iter_images(list1, [0,1], batch=100)
    input_img = Input(shape=(200,200,3))
    # Encoder Layers
    x = Conv2D(8, (3, 3), activation='relu')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(8, (3, 3), activation='relu')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(16, (3, 3), activation='relu')(x)

    # Decoder Layers
    x = Conv2D(16, (3, 3), activation='relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, (3, 3), activation='relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(1, (3, 3), activation='relu')(x)
    x = Flatten()(x)
    x = Dense(10)(x)
    decode = Dense(1, activation="sigmoid")(x)
    
    model = Model(input_img, decode)
    model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
    filepath="/scratch/gusmohyo/checkfile.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]
    model.fit_generator(imgiter, steps_per_epoch=10, epochs=30, callbacks=callbacks_list, verbose=0)
    model.save(args.modelfile)
    print("Option A is implemented!")
예제 #28
0
def get_max_lr(model: keras.Model,
               generator: Union[keras.utils.Sequence, Generator],
               lr_base: float = 1.0e-10,
               lr_max: float = 1.0,
               steps: int = 10000,
               smooth: int = 1000,
               plot: bool = True) -> float:
    """Осуществляет тестирование модели на максимальный learning rate и при необходимости рисует график

    :param model:
        Скомпилированная Keras модель.
    :param generator:
        Генератор обучающих примеров.
    :param lr_base:
        Начальная скорость обучения.
    :param lr_max:
        Максимальная скорость обучения.
    :param steps:
        Количество промежуточных шагов.
    :param smooth:
        Количество шагов для сглаживания.
    :param plot:
        Нужно ли рисовать график.
    :return:
        Максимальная скорость обучения
    """
    test = MaxLRTest(lr_base, lr_max, steps)
    model.fit_generator(generator,
                        steps_per_epoch=steps,
                        epochs=1,
                        callbacks=[test])
    history = pd.DataFrame(
        test.history).set_index("lr").loss.rolling(smooth).mean()
    lr = history.idxmin()
    print(f"Max speed learning rate  - {lr:.1e}")
    if plot:
        history.plot(logx=True, figsize=(16, 8))
        plt.show()
    return lr
예제 #29
0
def train(params):
    size = params['size']
    gen_train = batch_generator(get_generator(frame_train, params), batch_size,
                                len(frame_train), params)
    gen_valid = batch_generator(get_generator(frame_valid, params), batch_size,
                                len(frame_valid), params)
    gen_test = batch_generator(get_generator(frame_test, params), batch_size,
                               len(frame_test), params)

    model = VGG16(include_top=False,
                  weights=None,
                  input_shape=(params['size'], params['size'], 1))
    x = Flatten(name='flatten')(model.output)
    drop = params['dropout']
    if drop > 0:
        x = Dropout(drop)(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)
    model = Model(model.input, outputs=x)

    lr = pow(10, -params['lr_exp'])
    decay = pow(10, -params['decay_exp'])

    opt = SGD(lr=lr, momentum=0.9, decay=decay)

    model.compile(optimizer=opt, loss='mean_absolute_error')

    steps_train = int(ceil(len(frame_train) / batch_size))
    steps_valid = int(ceil(len(frame_valid) / batch_size))
    steps_test = int(ceil(len(frame_test) / batch_size))

    tensorboard = TensorBoard()

    loss = model.fit_generator(
        gen_train,
        steps_per_epoch=steps_train,
        epochs=5,
        validation_data=gen_valid,
        validation_steps=steps_valid,
        callbacks=[tensorboard],
    )

    return loss
예제 #30
0
    def train(self, train_batches, test_batches, model, num_train_steps,
              num_test_steps):
        classes = ["attack", "real"]

        model.layers.pop()

        for layer in model.layers:
            layer.trainable = False

        last = model.layers[-1].output

        classification_layer = Dense(len(classes), activation="softmax")(last)

        ft_model = Model(model.input, classification_layer)
        ft_model = multi_gpu_model(ft_model, gpus=2)
        ft_model.compile(optimizer=Adam(lr=0.00001),
                         loss='binary_crossentropy',
                         metrics=['accuracy'])

        time_callback = TimeHistory()

        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.2,
                                      patience=5,
                                      min_lr=0.000001)

        tensorboard = TensorBoard(
            log_dir=
            '/codes/bresan/remote/spoopy/spoopy/refactored/classification/finetuning',
            histogram_freq=0,
            write_graph=True,
            write_images=False)

        history = ft_model.fit_generator(train_batches,
                                         steps_per_epoch=num_train_steps,
                                         epochs=50,
                                         callbacks=[time_callback, reduce_lr],
                                         validation_data=test_batches,
                                         validation_steps=num_test_steps)

        return ft_model, history, time_callback