示例#1
0
 def get_optimizer(self):
     if self.opt_name == 'sgd':
         opt = SGD(lr=self.lr, momentum=0.9) # lr=1e-4
     elif self.opt_name == 'adam':
         opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) # lr=1e-3
     else:
         opt = SGD(lr=self.lr, momentum=0.9) # for clr # lr=1e-4
     return opt
示例#2
0
文件: base.py 项目: tdefa/big-fish
def get_optimizer(optimizer_name="adam", **kwargs):
    """Instantiate the optimizer.

    Parameters
    ----------
    optimizer_name : str
        Name of the optimizer to use.

    Returns
    -------
    optimizer : tf.keras.optimizers
        Optimizer instance used in the model.

    """
    # TODO use tensorflow optimizer
    if optimizer_name == "adam":
        optimizer = Adam(**kwargs)
    elif optimizer_name == "adadelta":
        optimizer = Adadelta(**kwargs)
    elif optimizer_name == "adagrad":
        optimizer = Adagrad(**kwargs)
    elif optimizer_name == "adamax":
        optimizer = Adamax(**kwargs)
    elif optimizer_name == "sgd":
        optimizer = SGD(**kwargs)
    else:
        raise ValueError(
            "Instead of {0}, optimizer must be chosen among "
            "['adam', 'adadelta', 'adagrad', adamax', sgd'].".format(
                optimizer_name))

    return optimizer
示例#3
0
def create_model():
    model = Sequential()
    model.add(Dense(1, input_shape=(3, ), activation='sigmoid'))
    #   model.add(Dense(NUMBER_OF_ACTIONS, activation='sigmoid'))
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss="mse", optimizer=sgd, metrics=["accuracy"])
    return model
def train_top_model():
    # Load the bottleneck features and labels
    train_features = np.load(
        open(output_dir + 'bottleneck_features_train.npy', 'rb'))
    train_labels = np.load(
        open(output_dir + 'bottleneck_labels_train.npy', 'rb'))
    validation_features = np.load(
        open(output_dir + 'bottleneck_features_validation.npy', 'rb'))
    validation_labels = np.load(
        open(output_dir + 'bottleneck_labels_validation.npy', 'rb'))

    # Create the top model for the inception V3 network, a single Dense layer
    # with softmax activation.
    top_input = Input(shape=train_features.shape[1:])
    top_output = Dense(5, activation='softmax')(top_input)
    model = Model(top_input, top_output)

    # Train the model using the bottleneck features and save the weights.
    model.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    csv_logger = CSVLogger(output_dir + 'top_model_training.csv')
    model.fit(train_features,
              train_labels,
              epochs=top_epochs,
              batch_size=batch_size,
              validation_data=(validation_features, validation_labels),
              callbacks=[csv_logger])
    model.save_weights(top_model_weights_path)
示例#5
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    dataset_DIR = os.path.join(DATA_DIR, options.dataset)

    data_dict = dd.io.load(os.path.join(dataset_DIR, 'data.h5'))

    x_train, y_train, x_test, y_test = data_dict['x_train'], data_dict[
        'y_train'], data_dict['x_test'], data_dict['y_test']

    validation_data = (x_test, y_test)

    kwargs = {
        'MAX_SEQUENCE_LENGTH': x_train.shape[1],
        'num_classes': y_train.shape[1],
        'num_words': data_dict['num_words'],
        'dropout_rate': options.dropout_rate,
        'flag': options.mode
    }

    if 'rand' in options.mode:
        kwargs['embedding_weights'] = None

    else:
        fname_wordvec = 'glove_'

        if options.wordvectors:
            fname_wordvec = 'word2vec_'

        kwargs['embedding_weights'] = np.load(
            os.path.join(dataset_DIR, fname_wordvec + 'embedding.npy'))

    text_model = TextCNN(**kwargs)

    model = model_placement(text_model, num_gpus=options.num_gpus)
    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=SGD(lr=options.lr))

    num_examples = range(x_train.shape[0])
    history = History()
    callbacks = [history]
    num_epochs = options.num_epochs

    if options.debug:
        validation_data = None
        num_examples = range(1000)
        callbacks = None
        num_epochs = 5

    train_data = (x_train[num_examples], y_train[num_examples])

    model.fit(x=x_train[num_examples],
              y=y_train[num_examples],
              batch_size=options.batch_size,
              callbacks=callbacks,
              epochs=num_epochs,
              validation_data=validation_data)
示例#6
0
def train(model_file,train_path,validation_path,num_hidden=200,num_classes=5,steps=32,num_epochs=20,save_period=1):
	if os.path.exists(model_file):
		print "\n***Existing model found at {}. Loading.***\n\n".format(model_file)
		model=load_existing(model_file)
	else:
		print "\n***Creating new model ***\n\n"
		model=create_model(num_hidden,num_classes)
	
	model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])

	checkpoint=ModelCheckpoint(model_file,period=save_period)

	train_datagen=ImageDataGenerator(rescale=1./255,shear_range=0.2,zoom_range=0.2,horizontal_flip=True)

	test_datagen=ImageDataGenerator(rescale=1./255)

	train_generator=train_datagen.flow_from_directory(train_path,target_size=(249,249),batch_size=32,class_mode="categorical")

	validation_generator=test_datagen.flow_from_directory(validation_path,target_size=(249,249),batch_size=32,class_mode='categorical')

	model.fit_generator(train_generator,steps_per_epoch=steps,epochs=num_epochs,callbacks=[checkpoint],validation_data=validation_generator,validation_steps=50)

	for layer in model.layers[:249]:
		layer.trainable=False
	
	for layer in model.layers[249:]:
		layer.trainable=True
	
	model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy',metrics=['accuracy'])

	model.fit_generator(train_generator,steps_per_epoch=steps,epochs=num_epochs,callbacks=[checkpoint],validation_data=validation_generator,validation_steps=50)
示例#7
0
    def fit(self, X, y, X_val, y_val):
        ## scaler
        #        self.scaler = StandardScaler()
        #        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1], )))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))

        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        logger.info(self.model.summary())

        ## callback
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=1e-2,
                                       patience=10,
                                       verbose=0,
                                       mode='auto')

        cb_my = LossHistory()

        ## fit
        self.model.fit(X,
                       y,
                       epochs=self.epochs,
                       batch_size=self.batch_size,
                       validation_data=[X_val, y_val],
                       callbacks=[early_stopping, cb_my],
                       verbose=1)
        return self
def main():
    direc_data = '/data/npz_data/cells/unspecified_nuclear_data/nuclear_movie/'
    dataset = 'nuclear_movie_same'

    training_data = np.load('{}{}.npz'.format(direc_data, dataset))

    optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    lr_sched = rate_scheduler(lr=0.01, decay=0.99)
    in_shape = (14, 14, 1)
    model = the_model(input_shape=in_shape)  #, n_features=1, reg=1e-5)

    train_model_siamese(
        model=model,
        dataset='nuclear_movie_same',
        optimizer=optimizer,
        expt='',
        it=0,
        batch_size=1,
        n_epoch=100,
        direc_save='/data/models/cells/unspecified_nuclear_data/nuclear_movie',
        direc_data=
        '/data/npz_data/cells/unspecified_nuclear_data/nuclear_movie/',
        lr_sched=lr_sched,
        rotation_range=0,
        flip=True,
        shear=0,
        class_weight=None)
def load_cnn():
    def get_lr_metric(optimizer):
        def lr(y_true, y_pred):
            return optimizer.lr

        return lr

    def recall(y_true, y_pred):
        y_true = K.ones_like(y_true)
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        all_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (all_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        y_true = K.ones_like(y_true)
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision

    cnn_name = 'DeepFont_5'
    cnn = load_model(f'E:\\FontRecognition\\App\\Model\\{cnn_name}.h5',
                     custom_objects={
                         'lr': get_lr_metric(SGD()),
                         'precision': precision,
                         'recall': recall
                     })
    return cnn
示例#10
0
def TrainModel(path, train_dir, val_dir, batch_size,
               epochs, out_nums, nb_train_samples,
               nb_val_samples, img_width=256, img_height=256, freeze=13):
    #生成训练和验证数据
    train_datagen = ImageDataGenerator(
        preprocessing_function=preprocess_input,
        rotation_range=40,
        # width_shift_range=0.2,
        # height_shift_range=0.2,
        # shear_range=0.2,
        # zoom_range=0.2,
        horizontal_flip=True, )  # 训练数据预处理器,随机水平翻转
    test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)  # 测试数据预处理器
    train_generator = train_datagen.flow_from_directory(train_dir,
                                                        target_size=(img_width, img_height),
                                                        batch_size=batch_size,
                                                        # class_mode='binary'
                                                        )  # 训练数据生成器
    validation_generator = test_datagen.flow_from_directory(val_dir,
                                                            target_size=(img_width, img_height),
                                                            batch_size=batch_size,
                                                            # class_mode='binary',
                                                            shuffle=True)  # 验证数据生成器

    base_model = VGG16(weights=path, include_top=False,     #加载迁移学习模型
                        input_shape=(img_width, img_height, 3))

    for ix, layers in enumerate(base_model.layers):
        if ix < freeze:    #冻结指定层
            layers.trainable = False
        # layers.trainable = False   #冻结指定层数层

    #添加新的层用于训练
    model = Flatten()(base_model.output)
    model = Dense(256, activation='relu', name='fc1')(model)
    model = Dropout(0.5, name='dropout1')(model)
    #=========================新加一层全连接=======================
    # model = Dense(64, activation='relu', name='fc2')(model)
    # model = Dropout(0.5, name='dropout2')(model)
    #==============================================================
    model = Dense(out_nums, activation='softmax')(model)
    # model = Dense(out_nums, activation='sigmoid')(model)
    model_final = Model(inputs=base_model.input, outputs=model)
    model_final.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.0001, momentum=0.9),
                  metrics=['accuracy'])
    # model_final.compile(loss='binary_crossentropy',
    #                     optimizer=SGD(lr=0.0001, momentum=0.9),
    #                     metrics=['accuracy'])
    print(model_final.summary())
    callbacks = [
        EarlyStopping(patience=2, verbose=1),
        ModelCheckpoint('savemodel_1fc256.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')
        # ModelCheckpoint('savemodel_1fc256_3conv_binary.h5', verbose=1, save_best_only=False, mode='max')
    ]

    # 训练&评估
    model_final.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs,
                        validation_data=validation_generator, validation_steps=nb_val_samples // batch_size,
                        callbacks=callbacks, initial_epoch=0)  # 每轮一行输出结果
def train(model_file,
          train_path,
          validation_path,
          target_size=(256, 256),
          num_classes=5,
          steps=32,
          num_epochs=28):
    if os.path.exists(model_file):
        print('\n*** existing model found at {}. Loading. ***\n\n'.format(
            model_file))
        model = load_existing(model_file)
    else:
        print("\n*** Creating new model ***\n\n")
        model = create_model(num_classes=num_classes)

    check_point = ModelCheckpoint(model_file, period=1)
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.3,
        zoom_range=0.3,
        # horizontal_flip=True,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        brightness_range=(0.8, 1.2))
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    train_generator = train_datagen.flow_from_directory(
        train_path,
        target_size=target_size,
        batch_size=32,
        class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
        validation_path,
        target_size=target_size,
        batch_size=32,
        class_mode='categorical')
    model.fit_generator(train_generator,
                        steps_per_epoch=steps,
                        epochs=num_epochs,
                        callbacks=[
                            check_point,
                        ],
                        validation_data=validation_generator,
                        validation_steps=50)
    for layer in model.layers[:249]:
        layer.trainable = False

    for layer in model.layers[249:]:
        layer.trainable = True

    model.compile(optimizer=SGD(lr=0.00001, momentum=0.9),
                  loss='categorical_crossentropy')
    model.fit_generator(train_generator,
                        steps_per_epoch=steps,
                        epochs=num_epochs,
                        callbacks=[check_point],
                        validation_data=validation_generator,
                        validation_steps=50)
示例#12
0
def get_optimizer(optimizer='sgd', learning_rate=0.1, momentum=0.9, log=True):
    """Create an optimizer and wrap it for Horovod distributed training. Default is SGD."""
    if log:
        print('Creating optimizer on rank ' + str(hvd.rank()))
    opt = None
    if optimizer == 'sgd+nesterov':
        opt = SGD(lr=learning_rate, momentum=momentum, nesterov=True)
    elif optimizer == 'rmsprop':
        opt = RMSprop(lr=learning_rate, rho=0.9)
    elif optimizer == 'adam':
        opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False)
    elif optimizer == 'adadelta':
        opt = Adadelta(lr=learning_rate, rho=0.95)
    else:
        opt = SGD(lr=learning_rate, momentum=momentum, nesterov=False)
    # Wrap optimizer for data distributed training
    return hvd.DistributedOptimizer(opt)
示例#13
0
def baseline_model(grid_size, num_actions, hidden_size):
    # seting up the model with keras
    model = Sequential()
    model.add(
        Dense(hidden_size, input_shape=(grid_size**2, ), activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(num_actions))
    model.compile(SGD(lr=.1), "mse")
    return model
示例#14
0
def get_top_model(base_model, n_labels):
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    top_model.add(Dense(1024, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(n_labels, activation='sigmoid'))
    top_model.compile(loss='binary_crossentropy',
                      optimizer=SGD(lr=1e-4, momentum=0.9),
                      metrics=['accuracy'])
    return top_model
示例#15
0
def _get_model(model_weight_file):
    loss = { 'reconstruction': losses.mean_squared_error, 'predictions': losses.categorical_crossentropy }
    optimizer = SGD(lr=0.005, momentum=0.9, decay=1e-6, nesterov=True)
    metrics = { 'predictions': [categorical_accuracy] }
    class_weight = { 'reconstruction': 0.05, 'predictions': 1. }

    model = get_model()
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    model.load_weights(model_weight_file)

    return model
示例#16
0
    def _set_fine_tune(self):
        layers_to_freeze = int(len(self.__model.layers) * 0.9)

        for layer in self.__model.layers[:layers_to_freeze]:
            layer.trainable = False
        for layer in self.__model.layers[layers_to_freeze:]:
            layer.trainable = True

        self.__model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                             loss='categorical_crossentropy',
                             metrics=['accuracy'])
示例#17
0
def train_model_on_training_data():
    direc_save = os.path.join(MODEL_DIR, PREFIX)
    direc_data = os.path.join(NPZ_DIR, PREFIX)
    training_data = np.load(os.path.join(direc_data, DATA_FILE + '.npz'))

    class_weights = training_data['class_weights']
    X, y = training_data['X'], training_data['y']
    print('X.shape: {}\ny.shape: {}'.format(X.shape, y.shape))

    n_epoch = 100
    batch_size = 32 if DATA_OUTPUT_MODE == 'sample' else 1
    optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    lr_sched = rate_scheduler(lr=0.01, decay=0.99)

    model_args = {'norm_method': 'median', 'reg': 1e-5, 'n_features': 3}

    data_format = K.image_data_format()
    row_axis = 2 if data_format == 'channels_first' else 1
    col_axis = 3 if data_format == 'channels_first' else 2
    channel_axis = 1 if data_format == 'channels_first' else 3

    if DATA_OUTPUT_MODE == 'sample':
        train_model = train_model_sample
        the_model = bn_feature_net_61x61
        model_args['n_channels'] = 1

    elif DATA_OUTPUT_MODE == 'conv' or DATA_OUTPUT_MODE == 'disc':
        train_model = train_model_conv
        the_model = bn_dense_feature_net
        model_args['location'] = False

        size = (RESHAPE_SIZE,
                RESHAPE_SIZE) if RESIZE else X.shape[row_axis:col_axis + 1]
        if data_format == 'channels_first':
            model_args['input_shape'] = (X.shape[channel_axis], size[0],
                                         size[1])
        else:
            model_args['input_shape'] = (size[0], size[1],
                                         X.shape[channel_axis])

    model = the_model(**model_args)

    train_model(model=model,
                dataset=DATA_FILE,
                optimizer=optimizer,
                batch_size=batch_size,
                n_epoch=n_epoch,
                direc_save=direc_save,
                direc_data=direc_data,
                lr_sched=lr_sched,
                class_weight=class_weights,
                rotation_range=180,
                flip=True,
                shear=True)
示例#18
0
文件: stagenet.py 项目: Mingbaitu/hat
    def args(self):
        self.CONV_KI = 'he_normal'
        self.CONV_KR = l2(0.001)

        self.D = 0.25
        self.DX = 0.5

        # train args
        self.EPOCHS = 384
        self.BATCH_SIZE = 128
        self.OPT = SGD(lr=1e-2, decay=1e-6)
        self.OPT_EXIST = True
示例#19
0
def create_model(epochs=25):
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               input_shape=(3, 32, 32),
               padding='same',
               activation='relu',
               kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(32, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(10, activation='softmax'))
    lrate = 0.01
    decay = lrate / epochs
    sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model
示例#20
0
def calculate_eval_loss_models(models):
    """
    Calculate the evaluation loss of all the models
    :param models: The models
    :return: The losses_dict, as a dict, mapping the model to the loss
    """
    predict_x_batches, predict_y_batches = seq2seq.generate_validation_data()
    losses = {}

    for model in models:
        if "attention" in model.name:
            from tensorflow.python.keras.optimizers import SGD
            model.model.compile(SGD(1), metrics.root_mean_squared_error)
        else:
            from keras.optimizers import SGD
            model.model.compile(SGD(1), metrics.root_mean_squared_error)
        model_loss = model.model.evaluate(predict_x_batches, predict_y_batches, batch_size=len(predict_y_batches))
        print("model_loss", model_loss)
        losses[model] = model_loss

    return losses
示例#21
0
def ablation(args, model, x_test, y_test_onehot, distances, perturbed, m_train):

    attacks = list(distances.keys())
    loss = np.zeros((len(attacks), 9))
    acc = np.zeros_like(loss)

    dict_loss = {}
    dict_acc = {}

    base_model_name = args.model_name
    if args.extension is not None:
        base_model_name = re.sub('_' + args.extension, '', base_model_name)
    base_model_name += '_multi'

    print(base_model_name)

    multi_model = select_model(x_test.shape[1:], base_model_name, optimizer=SGD(0.001, momentum=0.9, nesterov=True),
                               weight_decay=args.weight_decay)
    multi_model.set_weights(model.get_weights())

    for a, i in zip(attacks, range(len(attacks))):

        ind = ind_perturbed(distances[a])

        x_adv = np.array(x_test, copy=True)
        if ind.shape[0] > 0:
            x_adv[ind] = perturbed[a][ind] - m_train

        loss[i, 0], acc[i, 0] = model.evaluate(x_test, y_test_onehot, batch_size=args.batch_size, verbose=0)
        loss[i, 1], acc[i, 1] = multi_model.evaluate([x_test, x_test, x_test], y_test_onehot,
                                                     batch_size=args.batch_size, verbose=0)

        loss[i, 2], acc[i, 2] = multi_model.evaluate([x_adv, x_test, x_test], y_test_onehot,
                                                     batch_size=args.batch_size, verbose=0)
        loss[i, 3], acc[i, 3] = multi_model.evaluate([x_test, x_adv, x_test], y_test_onehot,
                                                     batch_size=args.batch_size, verbose=0)
        loss[i, 4], acc[i, 4] = multi_model.evaluate([x_test, x_test, x_adv], y_test_onehot,
                                                     batch_size=args.batch_size, verbose=0)

        loss[i, 5], acc[i, 5] = multi_model.evaluate([x_adv, x_adv, x_test], y_test_onehot,
                                                     batch_size=args.batch_size, verbose=0)
        loss[i, 6], acc[i, 6] = multi_model.evaluate([x_adv, x_test, x_adv], y_test_onehot,
                                                     batch_size=args.batch_size, verbose=0)
        loss[i, 7], acc[i, 7] = multi_model.evaluate([x_test, x_adv, x_adv], y_test_onehot,
                                                     batch_size=args.batch_size, verbose=0)
        loss[i, 8], acc[i, 8] = multi_model.evaluate([x_adv, x_adv, x_adv], y_test_onehot,
                                                     batch_size=args.batch_size,
                                                     verbose=0)

        dict_loss[a] = loss[i]
        dict_acc[a] = acc[i]

    return dict_loss, dict_acc
示例#22
0
 def train_discriminator(self, epochs, train):
     # if not hasattr(self, "callback"):
     #     self.callback = self.get_callback()
     self.discriminator.model.trainable = True
     self.discriminator.model.compile(optimizer=SGD(1e-4, 0.9), loss='binary_crossentropy', metrics=['accuracy'])
     self.discriminator.model.fit_generator(
         generator=train,
         steps_per_epoch=len(train),
         epochs=epochs,
         # workers=8,
         # use_multiprocessing=True,
         # callbacks=self.callback
     )
示例#23
0
文件: EX_1.py 项目: tszdanger/NUS_ALL
def train(model, x_train, y_train, steps, epochs, x_test, y_test, modelname):
    model.compile(optimizer=SGD(lr=0.7, momentum=0.3),
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    save_callback = ModelCheckpoint(filepath=modelname)
    early_stop = EarlyStopping(monitor='loss', min_delta=0.01, patience=20)

    model.fit(x=x_train,
              y=y_train,
              steps_per_epoch=steps,
              epochs=epochs,
              shuffle=True,
              callbacks=[save_callback, early_stop])
    print(model.evaluate(x=x_test, y=y_test))
示例#24
0
def train(model_file,train_path,validation_path,num_hidden=200,num_classes=4,steps=32,num_epochs=20,save_period=1):
    if os.path.exists(model_file):
        print ("\n*******existing model found at {}".format(model_file))
        model = load_existing(model_file)
    else:
        print ("\n***creating a new model****\n")
        model = create_model(num_hidden,num_classes)

    model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])

    checkpoint = ModelCheckpoint(model_file,period=save_period)

    #通过实时数据增强生成张量图像数据批次。数据将不断循环(按批次)。
    train_datagen=ImageDataGenerator(
        rescale = 1./255,                                               #重缩放因子。默认为 None。如果是 None 或 0,不进行缩放,否则将数据乘以所提供的值
        shear_range=0.2,                                                #浮点数。剪切强度(以弧度逆时针方向剪切角度
        zoom_range=0.2,                                                 #浮点数 或 [lower, upper]。随机缩放范围。如果是浮点数,[lower, upper] = [1-zoom_range, 1+zoom_range]
        horizontal_flip = True)                                         #布尔值。随机水平翻转

    test_datagen = ImageDataGenerator(rescale=1./255)

    train_generator = train_datagen.flow_from_directory(
        train_path,                                                     #目标目录的路径。每个类应该包含一个子目录。任何在子目录树下的 PNG, JPG, BMP, PPM 或 TIF 图像,都将被包含在生成器中
        target_size = (249,249),                                        #整数元组 (height, width),默认:(256, 256)。所有的图像将被调整到的尺寸。
        batch_size = 32,                                                #一批数据的大小(默认 32)
        class_mode="categorical")                                       #决定返回的标签数组的类型

    validation_generator = test_datagen.flow_from_directory(
            validation_path,
            target_size=(249,249),
            batch_size=32,
            class_mode='categorical')

#使用 Python 生成器(或 Sequence 实例)逐批生成的数据,按批次训练模型。
#生成器与模型并行运行,以提高效率。例如,这可以让你在 CPU 上对图像进行实时数据增强,以在 GPU 上训练模型。
    model.fit_generator(
        train_generator,
        steps_per_epoch = steps,                                        #在声明一个 epoch 完成并开始下一个 epoch 之前从 generator 产生的总步数(批次样本)。它通常应该等于你的数据集的样本数量除以批量大小
        epochs= num_epochs,
        callbacks = [checkpoint],
        validation_data = validation_generator,
        validation_steps = 50)                                          #仅当 validation_data 是一个生成器时才可用。在停止前 generator 生成的总步数(样本批数)

    for layer in model.layers[:249]:
        layer.trainable = False
    for layer in model.layers[249:]:
        layer.trainable = True

    model.compile(optimizer=SGD(lr=0.001,momentum=0.9),loss='categorical_crossentropy',metrics=['accuracy'])
示例#25
0
 def __build_model__(self):
     self.model = Sequential()
     self.model.add(
         GRU(units=self.hidden_layer,
             return_sequences=True,
             input_shape=(self.input, self.feat_max)))
     self.model.add(Flatten())
     self.model.add(Dense(1, activation='sigmoid'))
     if self.optim == 'Adam':
         optimasi = Adam(lr=self.lr)
     else:
         optimasi = SGD(lr=self.lr)
     self.model.compile(loss='binary_crossentropy',
                        optimizer=optimasi,
                        metrics=['accuracy'])
示例#26
0
def setup_to_finetune(model):
    """Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.

  note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch

  Args:
    model: keras model
  """
    for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
        layer.trainable = False
    for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
        layer.trainable = True
    model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
def train_stl10(y_train, y_test, x_train, x_test, num_classes, epochs, batch_size):
    model = create_bb_model(num_hidden_units=20, num_classes=num_classes)

    opt = SGD(lr=1e-3, momentum=0.9, decay=0, nesterov=False)
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc', 'mse'])
    earlystopping = EarlyStopping(monitor='val_acc', patience=50, restore_best_weights=True)
    model.fit(x_train, y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_split=0.2,
              shuffle=True,
              callbacks=[earlystopping])
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])
    return model
示例#28
0
def train():
    model = create_model()
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd)

    checkpointer = ModelCheckpoint(filepath="/tmp/weights.hdf5",
                                   verbose=1,
                                   save_best_only=True)
    model.fit(X_train,
              y_train,
              nb_epoch=20,
              batch_size=16,
              show_accuracy=True,
              validation_split=0.2,
              verbose=2,
              callbacks=[checkpointer])
示例#29
0
    def __init__(self,
                 CI_recommend_model,
                 CI_model,
                 NI_recommend_model,
                 NI_model,
                 top_MLP_recommend_model=None,
                 top_MLP_model=None,
                 model_mode='ft',
                 ft_mode='',
                 lr=0.0001):
        """
        # 传入一个训练好的CI,NI,top_MLP_model模型
        :param CI_model:
        :param NI_model:
        :param top_MLP_model: fine_tune时利用其参数初始化
        :param model_mode : 是基于已有模型再finetune还是参数完全随机化,'co_train'  'ft'
        :param ft_mode:topMLP;3MLP(也会更新特征交互时的attention); whole
        """
        self.model = None
        self.model_mode = model_mode
        self.CI_recommend_model = CI_recommend_model
        self.NI_recommend_model = NI_recommend_model
        self.top_MLP_recommend_model = top_MLP_recommend_model
        self.CI_model = CI_model
        self.NI_model = NI_model
        self.top_MLP_model = top_MLP_model  # 如果是fine-tune,必须非None

        self.lr = lr  # 起初0.0001 0.0003  学习率特别小
        self.optimizer = SGD(lr=self.lr)
        self.predict_fc_unit_nums = new_Para.param.predict_fc_unit_nums

        self.ft_mode = ft_mode  # 默认只训练topMLP,可以三个MLP连同attention一起训练,还是连同特征提取器一起训练
        if self.model_mode == 'co_train':
            self.simple_name = 'co_train_CI_{}_NI_{}_{}'.format(
                new_Para.param.simple_CI_mode, new_Para.param.NI_OL_mode,
                self.lr)
        elif self.model_mode == 'ft':
            self.simple_name = 'ft_{}_{}_{}-2'.format(
                ft_mode, self.top_MLP_recommend_model.simple_name, self.lr)
        self.model_name = self.simple_name  # 再改!!!
        # 路径在NI下面,一次是CI/NI/top_MLP
        # self.model_dir = os.path.join(top_MLP_recommend_model.model_dir,self.simple_name)
        self.model_dir = os.path.join(CI_recommend_model.model_dir,
                                      self.simple_name)  # CI路径下
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
        self.model_name_path = os.path.join(self.model_dir, 'model_name.dat')
示例#30
0
 def test_pretrained_weights(self):
   keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
   keras_model.compile(
       loss='categorical_crossentropy',
       optimizer=rmsprop.RMSPropOptimizer(1e-3),
       metrics=['mse', keras.metrics.CategoricalAccuracy()])
   keras_model.train_on_batch(
       np.random.random((10,) + _INPUT_SIZE),
       np.random.random((10, _NUM_CLASS)))
   weights = keras_model.get_weights()
   keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
   keras_model.set_weights(weights)
   keras_model.compile(
       loss='categorical_crossentropy',
       optimizer=SGD(lr=0.0001, momentum=0.9),
       metrics=['mse', keras.metrics.CategoricalAccuracy()])
   keras_lib.model_to_estimator(keras_model=keras_model, config=self._config)