Example #1
0
    def __init__(self, args):
        self.args = args
        self.model = FaceNet(args).model
        self.train_datasets, self.nrof_train = create_datasets_from_tfrecord(
            tfrcd_dir=args.datasets, batch_size=args.batch_size, phase='train')

        self.val_datasets, self.nrof_val = create_datasets_from_tfrecord(
            tfrcd_dir=args.datasets, batch_size=args.batch_size, phase='val')
        self.lr_schedule = schedules.ExponentialDecay(args.learning_rate,
                                                      decay_steps=10000,
                                                      decay_rate=0.96,
                                                      staircase=True)

        self.optimizer = Adam(learning_rate=self.lr_schedule,
                              beta_1=0.9,
                              beta_2=0.999,
                              epsilon=0.1)
        self.checkpoint = tf.train.Checkpoint(
            epoch=tf.Variable(0, dtype=tf.int64),
            n_iter=tf.Variable(0, dtype=tf.int64),
            best_pred=tf.Variable(0.0, dtype=tf.float32),
            optimizer=self.optimizer,
            model=self.model)
        self.manager = tf.train.CheckpointManager(self.checkpoint,
                                                  args.checkpoint_dir,
                                                  max_to_keep=3)
        check_folder(args.log_dir)
        self.train_summary_writer = tf.summary.create_file_writer(args.log_dir)
Example #2
0
    def get_learning_rate(self):
        """
        Returns keras schedule for learning rate based on lr_schedule specified in the config
        :return: keras.optimizers.schedules
        """

        if self.config.lr_schedule == 'ExponentialDecay':
            lr = schedules.ExponentialDecay(
                self.config.learning_rate,
                decay_steps=self.config.decay_steps,
                decay_rate=self.lr_decay,
                staircase=True)
        elif self.config.lr_schedule == 'PolynomialDecay':
            lr = schedules.PolynomialDecay(self.config.learning_rate,
                                           self.config.decay_steps,
                                           self.config.end_learning_rate,
                                           power=self.config.decay_power)
        elif self.config.lr_schedule == 'PiecewiseConstantDecay':
            lr = schedules.PiecewiseConstantDecay(
                self.config.piecewise_lr_boundaries,
                self.config.piecewise_lr_values)
        else:
            print("Invalid learning rate scheduler specified")
            raise ValueError

        return lr
Example #3
0
def TrainKerasModel(path,model,opt,x,y,w,custom_callbacks=None):
    from tensorflow.keras.optimizers import SGD,Adam,RMSprop,schedules
    from tensorflow.keras.models import Sequential

    getopt = opt.split(',')
    optlist={}
    for o in getopt:
        osplit=o.split(';')
        if len(osplit) == 2:
            optlist[osplit[0].upper()]=osplit[1]
        else:
            print ('Your options has to be like following,')
            print ('opt1;val1,...,optn;valn')
            exit()
    
    lrate=float(optlist['LRATE'])
    momentum=float(optlist['MOMENTUM'])
    nepoch=int(optlist['EPOCH'])
    batch=int(optlist['BATCH'])
    optimizer=optlist['OPTIMIZER']
    loss=optlist['LOSS']

    lrate_dec=0
    lrate_step=0
    if 'LRATE_DEC' in optlist: lrate_dec=float(optlist['LRATE_DEC'])
    if 'LRATE_STEP' in optlist: lrate_step=int(optlist['LRATE_STEP'])

    #Metrics
    mets=[]
    #if loss == 'Exp': mets.append(ExponentialLoss_Metric)

    #Callbacks
    cbacks=[]
    if custom_callbacks != None: cbacks.append(custom_callbacks) 

    if loss == 'Exp':
        loss = ExponentialLoss()

    if lrate_dec>0 and lrate_step>0:
        lrate = schedules.ExponentialDecay(initial_learning_rate=float(optlist['LRATE']),
                                 decay_steps=lrate_step,
                                 decay_rate=lrate_dec)
    print("Learning rate sceduled with initial rate "+str(float(optlist['LRATE']))+" by "+str(lrate_dec)+" decay and for each "+str(lrate_step)+" step.")

    optim=None
    if optimizer.upper() == 'SGD':
        optim=SGD(learning_rate=lrate, momentum=momentum)
    elif optimizer.upper() == 'ADAM':
        optim=Adam(learning_rate=lrate)
    elif optimizer.upper() == 'RMSPROP':
        optim=RMSprop(learning_rate=lrate, momentum=momentum)
    else:
        print ('Available optimizers: SGD,ADAM,RMSPROP')
        print ('You choose none, SGD will be run..')
        optim=SGD(learning_rate=lrate, momentum=momentum)
    
    model.compile(loss=loss, optimizer=optim, metrics=mets)
    print (model.summary())
    model.fit(np.array(x).astype(np.float32), np.array(y).astype(np.float32), sample_weight = np.array(w).astype(np.float32), epochs=nepoch, batch_size=batch, callbacks=cbacks)
Example #4
0
 def __init__(self,
              initial_value: float,
              decay_steps: int,
              decay_rate: float,
              min_value=1e-4):
     super().__init__(schedule=schedules.ExponentialDecay(initial_value,
                                                          decay_steps,
                                                          decay_rate,
                                                          staircase=True),
                      min_value=min_value)
Example #5
0
 def __init__(self,
              initial_value: float,
              decay_steps: int,
              decay_rate: float,
              staircase=False,
              min_value=0.0):
     super().__init__(schedule=schedules.ExponentialDecay(
         initial_learning_rate=initial_value,
         decay_steps=decay_steps,
         decay_rate=decay_rate,
         staircase=staircase),
                      min_value=min_value)
Example #6
0
        def setLRScheduler(self):
            """ Set Learning rate scheduler for training process.
        
                Returns
                -------
                learning rate.
        
            """

            lr_schedule = schedules.ExponentialDecay(self.lr_init_rate,
                                                     decay_steps=100000,
                                                     decay_rate=0.96,
                                                     staircase=True)
            return lr_schedule
Example #7
0
def styleTransfer(sourcepath, stylepath):
    path = 'static/uploads'
    base_image_path = os.path.join(path, "source.png")
    style_reference_image_path = os.path.join(path, "style.png")
    result_prefix = "static/results/result"
    global content_weight
    global total_variation_weight
    global style_weight
    total_variation_weight = 1e-6
    style_weight = 3e-6
    content_weight = 5e-7
    width, height = image.load_img(base_image_path).size
    global img_nrows
    global img_ncols
    img_nrows = 400
    img_ncols = int(width * img_nrows / height)
    content_img = preprocess_image(base_image_path)
    shape = content_img.shape[1:]
    model = VGG19(weights="imagenet", include_top=False, input_shape=shape)
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
    global feature_extractor
    feature_extractor = Model(inputs=model.inputs, outputs=outputs_dict)
    global style_layer_names
    style_layer_names = [
        "block1_conv1",
        "block2_conv1",
        "block3_conv1",
        "block4_conv1",
        "block5_conv1",
    ]
    global content_layer_name
    content_layer_name = "block5_conv2"
    optimizer = SGD(
        schedules.ExponentialDecay(initial_learning_rate=100.0,
                                   decay_steps=100,
                                   decay_rate=0.96))
    base_image = preprocess_image(base_image_path)
    style_reference_image = preprocess_image(style_reference_image_path)
    combination_image = tf.Variable(preprocess_image(base_image_path))
    iterations = 100
    for i in range(1, iterations + 1):
        loss, grads = compute_loss_and_grads(combination_image, base_image,
                                             style_reference_image)
        optimizer.apply_gradients([(grads, combination_image)])
        print("Iteration %d: loss=%.2f" % (i, (loss)))
    print(combination_image)
    img = deprocess_image(combination_image.numpy())
    fname = result_prefix + ".png"
    image.save_img(fname, img)
Example #8
0
 def __init__(self,
              action_count,
              state_count,
              alpha=0.1,
              lr_decay_rate=0.9,
              steps=1000):
     self._state_count = state_count
     self._action_count = action_count
     self._model = self._build_model()
     # self._model.build()
     self._lr = schedules.ExponentialDecay(initial_learning_rate=alpha,
                                           decay_rate=lr_decay_rate,
                                           decay_steps=steps,
                                           staircase=True)
     self._optimizer = Adam(learning_rate=self._lr)
     self._model.compile(optimizer=self._optimizer, loss='mse')
Example #9
0
def FCC_model():

    inputs = Input(shape=(16, ))
    t = Dense(256)(inputs)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    t = Dense(256)(t)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    t = Dense(256)(t)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    t = Dense(128)(t)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    t = Dense(128)(t)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    t = Dense(128)(t)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    t = Dense(128)(t)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    t = Dense(64)(t)
    t = ReLU()(t)
    t = BatchNormalization()(t)
    outputs = Dense(11)(t)

    lr_schedule = schedules.ExponentialDecay(initial_learning_rate=5e-3,
                                             decay_steps=20000,
                                             decay_rate=0.9)

    model = Model(inputs, outputs)
    model.compile(optimizer=Adam(learning_rate=lr_schedule), loss=custom_loss)

    return model
Example #10
0
    def fit(self,
            model,
            metric,
            epochs,
            batch_size=256,
            learning_rate=0.1,
            callbacks=[],
            save_best=False):
        super().prepare_data(batch_size)
        self.epochs = epochs
        # run native fit
        lr_schedule = schedules.ExponentialDecay(
            initial_learning_rate=learning_rate,
            decay_steps=10000,
            decay_rate=0.95)
        if not callbacks:
            es_patience = 7 if self.searching_mode else 15
            red_patience = 3 if self.searching_mode else 6

            callbacks = [
                tf.keras.callbacks.EarlyStopping(patience=es_patience,
                                                 min_delta=1e-2),
                tf.keras.callbacks.ReduceLROnPlateau(patience=red_patience),
                tf.keras.callbacks.LearningRateScheduler(schedule=lr_schedule),
                tf.keras.callbacks.TerminateOnNaN(),
            ]
        if save_best:
            callbacks.append(
                tf.keras.callbacks.ModelCheckpoint(filepath='saved_models/s',
                                                   save_weights_only=True,
                                                   monitor='val_loss',
                                                   mode='auto',
                                                   verbose=0,
                                                   save_best_only=True))
        if self.save_tf_logs:
            log_dir = "logs/fit/"
            tensorboard_callback = tf.keras.callbacks.TensorBoard(
                log_dir=log_dir)
            callbacks += [tensorboard_callback]

        # we will stop after a 1/3rd of total epochs to determine whether this model is performing so poorly
        # as to justify an early abort
        history = None
        for epochs in [round(epochs * .33), round(epochs * .66)]:
            history = model.fit(self.train_ds,
                                validation_data=self.val_ds,
                                epochs=epochs,
                                batch_size=batch_size,
                                verbose=1,
                                callbacks=[callbacks])
            score = history.history['val_mean_absolute_percentage_error'][-1]
            self.early_performance += [score]
            if len(self.early_performance) > 10:
                benchmark = median(self.early_performance
                                   )  # median ignore crazy excessive outliers
                if score > benchmark:
                    add_to_log("Performing poorly, discontinuing")
                    break
        # return metrics
        self.model = model
        self.history = history
        return self.history.history[metric][-1]
    #model.add(Dense(2*numfeatures, kernel_initializer=initializer1, activation='relu'))
    model.add(Dense(2*numfeatures, kernel_initializer=initializer1, activation='relu'))
    #model.add(Dense(4*numfeatures, kernel_initializer=initializer1, activation='relu'))
    #model.add(Dense(4*numfeatures, kernel_initializer=initializer1, activation='relu'))
    model.add(Dense(4*numfeatures, kernel_initializer=initializer1, activation='relu'))
    model.add(Dense(4*numfeatures, kernel_initializer=initializer1, activation='relu'))
    #model.add(Dense(2*numfeatures, kernel_initializer=initializer1, activation='relu'))
    model.add(Dense(2*numfeatures, kernel_initializer=initializer1, activation='relu'))
    model.add(Dense(numfeatures, kernel_initializer=initializer1, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(10, kernel_initializer=initializer1, activation='relu'))
    model.add(Dense(len(df.EleType.unique()), kernel_initializer='glorot_uniform', activation='softmax'))

    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)
    
    lr_schedule = schedules.ExponentialDecay(initial_learning_rate=1e-3,decay_steps=10000,decay_rate=0.9)
    
    model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=lr_schedule), metrics=['accuracy',])


# In[14]:


history = model.fit(X_train,y_train,validation_data=(X_test,y_test,Wt_test),epochs=epochs,batch_size=batch_size,shuffle=True,sample_weight=Wt_train,callbacks=[es])


# In[15]:


model.save(plot_dir+'my_model.h5')
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
Example #12
0
    def __init__(
        self,
        environment_spec: specs.EnvironmentSpec,
        network: snt.RNNCore,
        counter: counting.Counter = None,
        logger: loggers.Logger = None,
        snapshot_dir: Optional[str] = None,
        n_step_horizon: int = 16,
        minibatch_size: int = 80,
        learning_rate: float = 2e-3,
        discount: float = 0.99,
        gae_lambda: float = 0.99,
        decay: float = 0.99,
        epsilon: float = 1e-5,
        entropy_cost: float = 0.,
        baseline_cost: float = 1.,
        max_abs_reward: Optional[float] = None,
        max_gradient_norm: Optional[float] = None,
        max_queue_size: int = 100000,
        verbose_level: Optional[int] = 0,
    ):
        # Internalize spec and replay buffer.
        self._environment_spec = environment_spec
        self._verbose_level = verbose_level
        self._minibatch_size = minibatch_size
        self._queue = queue.ReplayBuffer(max_queue_size=max_queue_size,
                                         batch_size=n_step_horizon)

        # Internalize network.
        self._network = network

        # Setup optimizer and learning rate scheduler.
        self._learning_rate = tf.Variable(learning_rate, trainable=False)
        self._lr_scheduler = schedules.ExponentialDecay(
            initial_learning_rate=learning_rate,
            decay_steps=8000,  # TODO make a flag
            decay_rate=0.96  # TODO make a flag
        )
        self._optimizer = snt.optimizers.RMSProp(
            learning_rate=self._learning_rate, decay=decay, epsilon=epsilon)
        #self._optimizer = snt.optimizers.Adam(
        #    learning_rate=self._learning_rate,
        #)

        # Hyperparameters.
        self._discount = discount
        self._gae_lambda = gae_lambda
        self._entropy_cost = entropy_cost
        self._baseline_cost = baseline_cost

        # Set up reward/gradient clipping.
        if max_abs_reward is None:
            max_abs_reward = np.inf
        if max_gradient_norm is None:
            max_gradient_norm = 1e10  # A very large number. Infinity results in NaNs.
        self._max_abs_reward = tf.convert_to_tensor(max_abs_reward)
        self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)

        if snapshot_dir is not None:
            self._snapshotter = tf2_savers.Snapshotter(
                objects_to_save={'network': self._network},
                directory=snapshot_dir,
                time_delta_minutes=60.)

        # Logger.
        self._counter = counter or counting.Counter()
        self._logger = logger

        # Do not record timestamps until after the first learning step is done.
        # This is to avoid including the time it takes for actors to come online and
        # fill the replay buffer.
        self._timestamp = None
        self._pi_old = None
Example #13
0
    checkpoint_path = "Regression_Model/" + trainid + "-{epoch:04d}.ckpt"
    # Create a callback that saves the model's weights
    cp_callback = ModelCheckpoint(filepath=checkpoint_path,
                                  save_weights_only=True,
                                  verbose=1,
                                  period=10)

    model = Regression_CNN(length)

    log_dir = "TensorBoard/Regression/" + trainid + "_" + datetime.now(
    ).strftime("%Y%m%d-%H%M%S")
    tensorboard_cbk = TensorBoard(log_dir=log_dir)

    lr_schedule = schedules.ExponentialDecay(learning_rate,
                                             decay_steps=5000,
                                             decay_rate=0.96)
    model.compile(loss='mean_squared_error',
                  optimizer=SGD(momentum=0.98, learning_rate=lr_schedule),
                  metrics=['mse'])
    #model.compile(loss='mean_absolute_error', optimizer= SGD(momentum = 0.98, learning_rate = lr_schedule), metrics=['mse'])

    #plot_model(model, to_file=combination+'model_plot.png', show_shapes=True, show_layer_names=True)
    #print(model.summary())

    model.fit(training_generator,
              epochs=epochs,
              validation_data=validation_generator,
              callbacks=[tensorboard_cbk, cp_callback])

    evaluate(train_x, train_y, train_pasid, trainid, 'train', label_mean,
Example #14
0
def train_top(model_name, load_weights_path=None):
    backbone, model = model_inceptionv3()

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in backbone.layers:
        layer.trainable = True
    # compile the model (should be done *after* setting layers to non-trainable)
    lr_schedule = schedules.ExponentialDecay(
        initial_learning_rate=Config.LEARNING_RATE,
        decay_steps=Config.DECAY_STEPS,
        decay_rate=Config.DECAY_RATE)
    optimizer = Adam(learning_rate=lr_schedule)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    plot_model(model, show_shapes=True, to_file='inceptionv3_architecture.png')
    exit()

    if load_weights_path is not None:
        print(f"Loaded weights {load_weights_path}")
        model.load_weights(load_weights_path)

    model_path = os.path.join("models",
                              model_name + "-{epoch:02d}-{val_loss:.3f}.hdf5")

    df = pd.read_csv(Config.TRAINING_CSV_PATH)
    df["label"] = df["label"].apply(
        lambda x: str(np.argmax(np.array(x[1:-1].split(","), dtype="int"))))
    num_samples = np.bincount(df["label"])
    class_weight = {}
    for i, num in enumerate(num_samples):
        class_weight[i] = 1. / num
    datagen = ImageDataGenerator(
        preprocessing_function=inception_v3.preprocess_input,
        validation_split=Config.VALIDATION_RATIO)
    train_generator = datagen.flow_from_dataframe(
        subset="training",
        dataframe=df,
        directory=Config.TRAINING_IMAGES_PATH,
        x_col="image",
        y_col="label",
        class_mode="categorical",
        batch_size=Config.BATCH_SIZE,
        shuffle=True)
    validation_generator = datagen.flow_from_dataframe(
        subset="validation",
        dataframe=df,
        directory=Config.TRAINING_IMAGES_PATH,
        x_col="image",
        y_col="label",
        class_mode="categorical",
        batch_size=Config.BATCH_SIZE,
        shuffle=False)

    checkpoint = ModelCheckpoint(filepath=model_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False)
    tensorboard = TensorBoard(
        log_dir=f"./logs/{model_name}-{int(time.time())}", update_freq=1)
    earlystopping = EarlyStopping(monitor='val_loss',
                                  patience=20,
                                  min_delta=0.01)
    model.fit(train_generator,
              initial_epoch=Config.INITIAL_EPOCH,
              steps_per_epoch=train_generator.samples // Config.BATCH_SIZE,
              validation_data=validation_generator,
              validation_steps=validation_generator.samples //
              Config.BATCH_SIZE,
              epochs=Config.TRAIN_EPOCH,
              callbacks=[checkpoint, tensorboard, earlystopping])