コード例 #1
0
    def __init__(self, args):
        super().__init__()
        self.args = args
        obs_dim, act_dim = self.args.obs_dim, self.args.act_dim
        n_hiddens, n_units, hidden_activation = self.args.num_hidden_layers, self.args.num_hidden_units, self.args.hidden_activation
        value_model_cls, policy_model_cls = NAME2MODELCLS[self.args.value_model_cls], \
                                            NAME2MODELCLS[self.args.policy_model_cls]
        self.policy = policy_model_cls(
            obs_dim,
            n_hiddens,
            n_units,
            hidden_activation,
            act_dim * 2,
            name='policy',
            output_activation=self.args.policy_out_activation)
        policy_lr_schedule = PolynomialDecay(*self.args.policy_lr_schedule)
        self.policy_optimizer = self.tf.keras.optimizers.Adam(
            policy_lr_schedule, name='adam_opt')

        self.vs = value_model_cls(obs_dim,
                                  n_hiddens,
                                  n_units,
                                  hidden_activation,
                                  2,
                                  name='vs')
        value_lr_schedule = PolynomialDecay(*self.args.value_lr_schedule)
        self.value_optimizer = self.tf.keras.optimizers.Adam(value_lr_schedule,
                                                             name='adam_opt')

        self.models = (
            self.vs,
            self.policy,
        )
        self.optimizers = (self.value_optimizer, self.policy_optimizer)
コード例 #2
0
def get_lr_scheduler(learning_rate, decay_type, decay_steps):
    if decay_type:
        decay_type = decay_type.lower()

    if decay_type == None:
        lr_scheduler = learning_rate
    elif decay_type == 'cosine':
        lr_scheduler = CosineDecay(
            initial_learning_rate=learning_rate,
            decay_steps=decay_steps,
            alpha=0.2)  # use 0.2*learning_rate as final minimum learning rate
    elif decay_type == 'exponential':
        lr_scheduler = ExponentialDecay(initial_learning_rate=learning_rate,
                                        decay_steps=decay_steps,
                                        decay_rate=0.9)
    elif decay_type == 'polynomial':
        lr_scheduler = PolynomialDecay(initial_learning_rate=learning_rate,
                                       decay_steps=decay_steps,
                                       end_learning_rate=learning_rate / 100)
    elif decay_type == 'piecewise_constant':
        #apply a piecewise constant lr scheduler, including warmup stage
        boundaries = [500, int(decay_steps * 0.9), decay_steps]
        values = [
            0.001, learning_rate, learning_rate / 10., learning_rate / 100.
        ]
        lr_scheduler = PiecewiseConstantDecay(boundaries=boundaries,
                                              values=values)
    else:
        raise ValueError('Unsupported lr decay type')

    return lr_scheduler
コード例 #3
0
def make_scheduler(name, init_lr, steps_per_epoch):
    if name == 'constant':
        lr_fn = init_lr
    elif name == 'piecewise_decay':
        lr_fn = PiecewiseConstantDecay(
                boundaries=[20*steps_per_epoch,
                            50*steps_per_epoch,
                            100*steps_per_epoch,
                            150*steps_per_epoch,
                            200*steps_per_epoch,
                            250*steps_per_epoch],
                values=[init_lr,
                        init_lr * 0.8, #50
                        init_lr * 0.6, #50
                        init_lr * 0.4, #100
                        init_lr * 0.3, #150
                        init_lr * 0.2, #200
                        init_lr * 0.1]) #250
    elif name == 'linear_decay':
        decay_steps = steps_per_epoch * 300 # defaultly during 300 epoch, learning rate goes down.
        lr_fn = PolynomialDecay(initial_learning_rate=init_lr, decay_steps=decay_steps, end_learning_rate=init_lr*0.1, power=1.0)
    elif name == 'cosine_decay_restart':
        first_decay_steps = 1000
        lr_fn = CosineDecayRestarts(initial_learning_rate=init_lr, first_decay_steps=first_decay_steps, t_mul=2.0)
    print(name, lr_fn)
    return lr_fn
コード例 #4
0
def getScheduler(sched, lr=1e-3):
    if sched == 'poly':
        return PolynomialDecay(lr, 3000, 0.99)
    elif sched == 'exp': 
        return ExponentialDecay(lr, 1000)
    else:
        return lr
        
コード例 #5
0
def build_model(transformer,
                max_len: int = 512,
                multi_class: bool = True,
                lr_decay: bool = False):  # noqa: D205
    """
    Build an end-to-end Transformer model. Requires a transformer of type TFAutoBert.
    https://www.kaggle.com/xhlulu/jigsaw-tpu-distilbert-with-huggingface-and-keras

    :param transformer: Transformer model
    :param max_len: maximum length of encoded sequence
    :param multi_class: if True, final layer is multiclass so softmax is used. If False, final layer
        is sigmoid and binary crossentropy is evaluated.
    :param lr_decay: if True, use a learning rate decay schedule. If False, use a constant learning rate.
    :return: Constructed Transformer model
    """
    input_word_ids = Input(shape=(max_len, ),
                           dtype=tf.int32,
                           name="input_word_ids")
    sequence_output = transformer(input_word_ids)[0]
    cls_token = sequence_output[:, 0, :]
    if multi_class:
        out = Dense(3, activation='softmax', name='softmax')(cls_token)
    else:
        out = Dense(1, activation='sigmoid', name='sigmoid')(cls_token)

    model = Model(inputs=input_word_ids, outputs=out)

    if lr_decay:
        # There are various options, starting with a linear decay for now
        # TODO: Tune for the best decay schedule
        lr = PolynomialDecay(initial_learning_rate=2e-5,
                             decay_steps=10000,
                             end_learning_rate=1e-6,
                             power=1)
    else:
        lr = 1e-6

    if multi_class:
        model.compile(Adam(learning_rate=lr),
                      loss='categorical_crossentropy',
                      metrics=[
                          tf.keras.metrics.Recall(),
                          tf.keras.metrics.Precision(),
                          tf.keras.metrics.CategoricalAccuracy()
                      ])
    else:
        model.compile(Adam(learning_rate=lr),
                      loss='binary_crossentropy',
                      metrics=[
                          tf.keras.metrics.Recall(),
                          tf.keras.metrics.Precision(), 'accuracy'
                      ])

    return model
コード例 #6
0
def get_lr_scheduler(learning_rate, decay_type, decay_steps):
    if decay_type:
        decay_type = decay_type.lower()

    if decay_type == None:
        lr_scheduler = learning_rate
    elif decay_type == 'cosine':
        lr_scheduler = CosineDecay(initial_learning_rate=learning_rate, decay_steps=decay_steps)
    elif decay_type == 'exponential':
        lr_scheduler = ExponentialDecay(initial_learning_rate=learning_rate, decay_steps=decay_steps, decay_rate=0.9)
    elif decay_type == 'polynomial':
        lr_scheduler = PolynomialDecay(initial_learning_rate=learning_rate, decay_steps=decay_steps, end_learning_rate=learning_rate/100)
    else:
        raise ValueError('Unsupported lr decay type')

    return lr_scheduler
コード例 #7
0
def main():
    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)

    # 读取数据
    reader = DataLoader(cfg.annotation_path, cfg.input_shape, cfg.batch_size)
    train, valid = reader.read_data_and_split_data()
    train_datasets = reader.make_datasets(train, "train")
    valid_datasets = reader.make_datasets(valid, "valid")
    train_steps = len(train) // cfg.batch_size
    valid_steps = len(valid) // cfg.batch_size

    if os.path.exists(cfg.log_dir):
        # 清除summary目录下原有的东西
        for f in os.listdir(cfg.log_dir):
            file = os.path.join(cfg.log_dir, f)
            shutil.rmtree(file)

    # 建立模型保存目录
    if not os.path.exists(os.path.split(cfg.model_path)[0]):
        os.mkdir(os.path.split(cfg.model_path)[0])

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(len(train), len(valid), cfg.batch_size))
    if cfg.train_mode == "eager":
        # 定义优化器和学习率衰减速率
        # PolynomialDecay参数:cfg.learn_rating 经过 cfg.epochs 衰减到 cfg.learn_rating/10
        # 1、lr_fn是类似是一个函数,每次需要它来计算当前学习率都会调用它
        # 2、它具有一个内部计数器,每次调用apply_gradients,就会+1
        yolo_loss = [YoloLoss(cfg.anchors[mask]) for mask in cfg.anchor_masks]
        lr_fn = PolynomialDecay(cfg.learn_rating, cfg.epochs, cfg.learn_rating / 10, 2)
        optimizer = Adam(learning_rate=lr_fn)
        low_level_train(optimizer, yolo_loss, train_datasets, valid_datasets, train_steps, valid_steps)
    else:
        # 创建summary
        writer = tf.summary.create_file_writer(logdir=cfg.log_dir + '/loss')

        optimizer = Adam(learning_rate=cfg.learn_rating)
        yolo_loss = [YoloLoss(cfg.anchors[mask],
                              summary_writer=writer,
                              optimizer=optimizer) for mask in cfg.anchor_masks]

        high_level_train(optimizer, yolo_loss, train_datasets, valid_datasets, train_steps, valid_steps)
コード例 #8
0
ファイル: app.py プロジェクト: AntonH91/RPS_Bot
def run_training():
    trainee = AiAgent(prediction_model,
                      gameplay_model,
                      epsilon=0.2,
                      epsilon_decay=PolynomialDecay(initial_learning_rate=0.9,
                                                    decay_steps=25,
                                                    end_learning_rate=0.1))
    trainer = MetaFixAgent()

    dojo = RPSDojo(trainee=trainee,
                   trainer=trainer,
                   episodes=NUM_GAMES,
                   optimizer=tf.keras.optimizers.Adam(),
                   loss=tf.keras.losses.MeanSquaredError(),
                   discount=0.95,
                   batch_size=512,
                   rounds_in_episode=NUM_ROUNDS,
                   trainee_logging_path=DATA_LOCATION + 'action_history.csv')

    dojo.run_training()

    gameplay_model.save(MODEL_LOCATION, include_optimizer=False)

    dojo.write_history(DATA_LOCATION + 'training_history.csv')
コード例 #9
0
ファイル: train.py プロジェクト: Runist/YOLOv4
def train_by_eager(train_datasets, valid_datasets, train_steps, valid_steps):
    # 创建模型结构
    if cfg.backbone == 'csp-darknet':
        model = yolo4_body(cfg.input_shape)
    elif cfg.backbone == 'tiny-csp-darknet':
        model = tiny_yolo4_body(cfg.input_shape)

    # 定义模型评估指标
    train_loss = metrics.Mean(name='train_loss')
    valid_loss = metrics.Mean(name='valid_loss')

    # 将数据集实例化成迭代器
    train_datasets = iter(train_datasets)
    valid_datasets = iter(valid_datasets)

    # 设置保存最好模型的指标
    best_test_loss = float('inf')
    patience = 10
    min_delta = 1e-3
    patience_cnt = 0
    history_loss = []

    # 创建优化器
    # 定义优化器和学习率衰减速率
    # PolynomialDecay参数:cfg.learn_rating 经过 cfg.epochs 衰减到 cfg.learn_rating/10
    # 1、lr_fn是类似是一个函数,每次需要它来计算当前学习率都会调用它
    # 2、它具有一个内部计数器,每次调用apply_gradients,就会+1
    lr_fn = PolynomialDecay(cfg.learning_rate, cfg.epochs,
                            cfg.learning_rate / 10, 2)
    optimizer = optimizers.Adam(learning_rate=lr_fn)

    # 创建summary
    summary_writer = tf.summary.create_file_writer(logdir=cfg.log_dir)

    # 定义loss
    yolo_loss = [
        YoloLoss(cfg.anchors[mask],
                 label_smooth=cfg.label_smooth,
                 summary_writer=summary_writer,
                 optimizer=optimizer) for mask in cfg.anchor_masks
    ]

    # low level的方式计算loss
    for epoch in range(1, cfg.epochs + 1):
        train_loss.reset_states()
        valid_loss.reset_states()

        # 处理训练集数据
        process_bar = tqdm(range(train_steps),
                           ncols=100,
                           desc="Epoch {}".format(epoch),
                           unit="step")
        for _ in process_bar:
            images, labels = next(train_datasets)
            with tf.GradientTape() as tape:
                # 得到预测
                outputs = model(images, training=True)
                # 计算损失(注意这里收集model.losses的前提是Conv2D的kernel_regularizer参数)
                regularization_loss = tf.reduce_sum(model.losses)
                pred_loss = []
                # yolo_loss、label、output都是3个特征层的数据,通过for 拆包之后,一个loss_fn就是yolo_loss中一个特征层
                # 然后逐一计算,
                for output, label, loss_fn in zip(outputs, labels, yolo_loss):
                    pred_loss.append(loss_fn(label, output))

                # 总损失 = yolo损失 + 正则化损失
                total_train_loss = tf.reduce_sum(
                    pred_loss) + regularization_loss

            # 反向传播梯度下降
            # model.trainable_variables代表把loss反向传播到每个可以训练的变量中
            grads = tape.gradient(total_train_loss, model.trainable_variables)
            # 将每个节点的误差梯度gradients,用于更新该节点的可训练变量值
            # zip是把梯度和可训练变量值打包成元组
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            # 更新train_loss
            train_loss.update_state(total_train_loss)

            loss = train_loss.result().numpy()
            process_bar.set_postfix({
                'loss':
                '{:.4f}'.format(loss),
                'lbox_loss':
                '{:.4f}'.format(pred_loss[0]),
                'mbox_loss':
                '{:.4f}'.format(pred_loss[1]),
                'sbox_loss':
                '{:.4f}'.format(pred_loss[2])
                if cfg.backbone == 'csp-backone' else None,
                'reg_loss':
                '{:.4f}'.format(regularization_loss),
            })

        # 计算验证集
        process_bar = tqdm(range(valid_steps),
                           ncols=100,
                           desc="Epoch {}".format(epoch),
                           unit="step")
        for _ in process_bar:
            images, labels = next(valid_datasets)
            # 得到预测,不training
            outputs = model(images)
            regularization_loss = tf.reduce_sum(model.losses)
            pred_loss = []
            for output, label, loss_fn in zip(outputs, labels, yolo_loss):
                pred_loss.append(loss_fn(label, output))

            total_valid_loss = tf.reduce_sum(pred_loss) + regularization_loss

            # 更新valid_loss
            valid_loss.update_state(total_valid_loss)
            process_bar.set_postfix({
                'loss':
                '{:.4f}'.format(valid_loss.result().numpy()),
                'lbox_loss':
                '{:.4f}'.format(pred_loss[0]),
                'mbox_loss':
                '{:.4f}'.format(pred_loss[1]),
                'sbox_loss':
                '{:.4f}'.format(pred_loss[2])
                if cfg.backbone == 'csp-backone' else None,
                'reg_loss':
                '{:.4f}'.format(regularization_loss),
            })

        # 保存loss,可以选择train的loss
        history_loss.append(valid_loss.result().numpy())

        # 保存到tensorboard里
        with summary_writer.as_default():
            tf.summary.scalar('train_loss',
                              train_loss.result(),
                              step=optimizer.iterations)
            tf.summary.scalar('valid_loss',
                              valid_loss.result(),
                              step=optimizer.iterations)
            tf.summary.scalar('regularization_loss',
                              regularization_loss,
                              step=optimizer.iterations)

        # 只保存最好模型
        if valid_loss.result() < best_test_loss:
            best_test_loss = valid_loss.result()
            model.save_weights(cfg.model_path)

        # EarlyStopping
        if epoch > 1 and history_loss[epoch - 2] - history_loss[epoch -
                                                                1] > min_delta:
            patience_cnt = 0
        else:
            patience_cnt += 1

        if patience_cnt >= patience:
            tf.print(
                "No improvement for {} times, early stopping optimization.".
                format(patience))
            break
コード例 #10
0
def train(type_name, n_hidden):
    # Initialize save path
    save_path = "/home/kevin/projects/exercise_pose_evaluation_machine/models/lstm_model/keras/" + type_name + "/" + type_name + "_lstm_model.h5"
    # Get original dataset
    x, y = get_dataset(type_name)
    # Fill original class type with the label 1
    y = [1 for label in y]

    # Get negative dataset
    neg_x, neg_y = get_dataset("not-" + type_name)

    # Fill original class type with the label 1
    neg_y = [0 for label in neg_y]
    x.extend(neg_x)
    y.extend(neg_y)

    # Flatten X coodinates and filter
    x = np.array(x)
    _x = []
    _y = []
    for idx, data in enumerate(x):
        data = [np.reshape(np.array(frames), (28)).tolist() for frames in data]
        _x.append(data)
        _y.append(y[idx])
    x = _x
    y = _y

    # Split to training and test dataset
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.3)
    x_train = np.array(x_train)
    x_test = np.array(x_test)
    y_train = np.array(y_train)
    y_test = np.array(y_test)

    # Define training parameters
    n_classes = 1

    # Make LSTM Layer
    # Pair of lstm cell initialization through loop
    # use_bias          -> Adding bias vector on each layer, by default is True so this line could be deleted
    # unit_forget_bias  ->
    lstm_cells = [
        LSTMCell(n_hidden,
                 activation='relu',
                 use_bias=True,
                 unit_forget_bias=1.0) for _ in range(2)
    ]
    stacked_lstm = StackedRNNCells(lstm_cells)
    lstm_layer = RNN(stacked_lstm)

    learning_rate = 1e-2
    lr_schedule = PolynomialDecay(initial_learning_rate=learning_rate,
                                  decay_steps=10,
                                  end_learning_rate=0.00001)
    optimizer = Adam(learning_rate=lr_schedule)

    # Initiate model
    # kernel_regularizers   -> regularizing weights to avoid overfit training data on layer kernel
    # activity_regularizer  -> regularizing weights to avoid overfit training data on layer output
    model = Sequential()
    model.add(lstm_layer)
    model.add(Dropout(0.3))
    model.add(
        Dense(n_classes,
              activation='sigmoid',
              kernel_regularizer=regularizers.l2(0.01),
              activity_regularizer=regularizers.l1(0.01)))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    # simple early stopping
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)

    # Train model
    # shufffle = True   -> shuffle training data
    # validation_split  -> portion of training data used for validation split
    # validation_data   -> external data used for validation
    model.fit(x_train,
              y_train,
              epochs=450,
              batch_size=150,
              shuffle=True,
              validation_data=(x_test, y_test),
              validation_split=0.4,
              callbacks=[es])

    # Print model stats
    print(model.summary())
    print(model.get_config())

    # Find accuracy
    _, accuracy = model.evaluate(x_test, y_test)
    print('Accuracy: %.2f' % (accuracy * 100))

    # Save model
    model.save(save_path)
    print("Saved model!")

    # Generate predictions
    print("See prediction result")
    random_int = random.randint(0, len(x_test))
    data = x_test[random_int]
    prediction = "1" if model.predict(np.array([data])) > 0.5 else "0"
    print("predictions result:", prediction)
    print("expected result: ", y_test[random_int])
コード例 #11
0
ファイル: train.py プロジェクト: kjcho92/projects
# #     num_found = re.search(r'\d+', checkpoint_filename)
# #     if num_found:
# #         start_epoch = int(num_found.group(0))
# #         _LOGGER.info(f"Resuming from epoch {start_epoch}")
# else:
model.save_weights(checkpoint_prefix, overwrite=True, save_format="tf")

# _LOGGER.debug("Model summary: %s", model.summary())
# -

# configure training and compile model
# create learning_rate function
if learning_policy == "poly":
    learning_rate_fn = PolynomialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=steps_per_epoch * n_epochs,
        end_learning_rate=0,
        power=learning_power,
    )
elif learning_policy == "step":
    learning_rate_fn = ExponentialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=steps_per_epoch,
        decay_rate=learning_rate_decay,
        staircase=True,
    )

# create custom metric
metric = MeanIoUCustom(
    num_classes=num_classes,
    ignore_label=ignore_label,
    crop_size_h=cs_h,
def train(type_name, filename, n_hidden, lstm_layer, dropout, epoch, batch_size, x, y):
    # Make filename
    date_string = datetime.now().isoformat().replace(':', '.')
    filename = f'{filename} k-fold results {date_string}'

    # Create file and write CSV header
    write_header(filename)
    body = {}
    body['n_hidden'] = n_hidden
    body['lstm_layer'] = lstm_layer
    body['dropout'] = dropout
    body['epoch'] = epoch
    body['batch_size'] = batch_size

    # Initialize total accuracy variable and number of K-Fold splits
    total = 0
    n_splits = 5

    # Initialize K Fold
    skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1)
    k_fold_index = 1

    x = np.array(x)
    y = np.array(y)
    t_start = time.time()
    for train_index, test_index in skf.split(x, y):
        # Initialize training sets
        x_train = x[train_index]
        y_train = y[train_index]
        x_test = x[test_index]
        y_test = y[test_index]

        # Define training parameters
        n_output = 1

        # Make LSTM Layer
        # Pair of lstm cell initialization through loop
        lstm_cells = [LSTMCell(
            n_hidden,
            activation='relu',
            use_bias=True,
            unit_forget_bias = 1.0
        ) for _ in range(lstm_layer)]
        stacked_lstm = StackedRNNCells(lstm_cells)

        # Decaying learning rate
        learning_rate = 1e-2
        lr_schedule = PolynomialDecay(
            initial_learning_rate=learning_rate,
            decay_steps=10,
            end_learning_rate= 0.00001
        )
        optimizer = Adam(learning_rate = lr_schedule)

        # Initiate model
        model = Sequential()
        model.add(RNN(stacked_lstm))
        model.add(Dropout(dropout))
        model.add(Dense(n_output, 
            activation='sigmoid',
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01)))
        model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        
        # Train model
        fit_start = time.time()
        model.fit(x_train, y_train, epochs=epoch, batch_size=batch_size, shuffle = True, validation_data = (x_test, y_test), validation_split = 0.4, callbacks=[ForceGarbageCollection()])

        # Print model stats
        print(model.summary())

        # Find accuracy
        _, accuracy = model.evaluate(x_test, y_test)
        accuracy *= 100
        total += accuracy
        body[f'k-fold {k_fold_index}'] = "{:.2f}".format(accuracy)
        body[f'k-fold {k_fold_index} time'] = float(time.time() - fit_start)
        print('Accuracy: %.2f' % (accuracy))
        k_fold_index += 1

        # UNTUK SELANJUTNYA, DIBUAT TRY EXCEPT UNTUK SETIAP BLOCK BERBEDA
        # SEPERTI SAAT PREDICT ATAU SAAT OLAH DATA ATAUPUN SAAT CEK AKURASI
        # AGAR GAMPANG PINPOINT MASALAH.

    # Write iterations
    body['seconds_to_finish'] = float(time.time() - t_start)
    body['exercise name'] = type_name
    body['avg'] = "{:.2f}".format(total/n_splits)
    write_body(filename, body)
コード例 #13
0
ファイル: main.py プロジェクト: albertobagnacani/DeepDeblur
# Print the summary
print(model.summary())

# Callbacks
tensorboard_callback = TensorBoard(log_dir=log_dir)  # , histogram_freq=1, profile_batch='1')

save_weights_only = False

# PolynomialDecay definition
if 'reds' in task:
    data_size = train_sharp_generator.samples // batch_size
else:
    data_size = len(train_sharp_generator)
max_steps = int((epochs-load_epoch) * data_size)

pd = PolynomialDecay(initial_learning_rate=initial_lr, decay_steps=max_steps, end_learning_rate=end_lr, power=power)

rlrop = ReduceLROnPlateau(monitor=monitor_rlrop, factor=factor_rlrop, patience=patience_rlrop, min_lr=min_lr_rlrop)
lrs = LearningRateScheduler(pd)
es = EarlyStopping(monitor=monitor_es, patience=patience_es)
mc = ModelCheckpoint(filepath=checkpoint_filepath, monitor='val_loss', save_best_only=False,
                     save_weights_only=save_weights_only, period=mc_period)

callbacks = [tensorboard_callback, mc]

if 'cifar' in task:
    callbacks.append(LearningRateScheduler(MyPolynomialDecay(max_epochs=epochs, init_lr=initial_lr, power=5)))

# Check if tf is using GPU
# print('Using GPU: {}'.format(tf.test.is_gpu_available()))
print(tf.config.list_physical_devices('GPU'))
def train(exercise_name, dataset):
    # Initialize save path
    save_path = '/home/kevin/projects/exercise_pose_evaluation_machine/models/pose_model'
    # save_path = '/home/kevin/projects/exercise_pose_evaluation_machine/models/pose_model_v2'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    save_path += '/' + str(exercise_name)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    save_path += '/' + str(exercise_name) + '_pose_model.h5'

    # Get keypoint
    x = []
    y = []
    for data in dataset:
        keypoints = np.array(data["keypoints"]).flatten()
        x.append(keypoints)

        is_starting_pose = data["is_starting_pose"]
        label = 1 if is_starting_pose else 0
        y.append(label)

    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.3)
    
    # Convert to np arrays so that we can use with TensorFlow
    x_train = np.array(x_train).astype(np.float32)
    x_test  = np.array(x_test).astype(np.float32)
    y_train = np.array(y_train).astype(np.float32)
    y_test  = np.array(y_test).astype(np.float32)
    
    # Define number of features, labels, and hidden
    num_features = 28 # 14 pairs of (x, y) keypoints
    num_hidden = 8
    num_output = 1

    # Decaying learning rate
    learning_rate = 0.01
    lr_schedule = PolynomialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=10,
        end_learning_rate= 0.00001
    )
    optimizer = SGD(learning_rate = lr_schedule)

    model = Sequential()
    model.add(Dense(60, input_shape=(num_features,)))
    model.add(Dense(30, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(num_output, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
    
    # Train model
    model.fit(x_train, y_train, epochs=250, batch_size=25, shuffle = True, validation_data = (x_test, y_test), validation_split = 0.3)

    # Find accuracy
    _, accuracy = model.evaluate(x_test, y_test)
    print('Accuracy: %.2f' % (accuracy*100))
    print("Class: " + exercise_name)

    # Save model to the designated path
    model.save(save_path)
    model.summary()
    print("Saved model")
target_q_net = CategoricalQNetwork(
    input_tensor_spec=eval_env.observation_spec(),
    action_spec=eval_env.action_spec(),
    num_atoms=num_atoms,
    conv_layer_params=conv_layer_units,
    fc_layer_params=fc_layer_units,
    activation_fn=GELU())

# Defining train_step, which will be used to store the current step.
train_step = tf.Variable(initial_value=0)
total_steps = 150000

# Defining decay epsilon-greedy strategy.
decay_epsilon_greedy = PolynomialDecay(
    initial_learning_rate=0.9,
    decay_steps=total_steps,
    end_learning_rate=0.001,
)

# 3. Constructing the DQN Agent.
optimizer = Yogi(learning_rate=0.00025)
loss = Huber()
n_steps = 3
tau = 0.001
gamma = 0.99
min_q = -200
max_q = 200

agent = CategoricalDqnAgent(
    time_step_spec=eval_env.time_step_spec(),
    action_spec=eval_env.action_spec(),
コード例 #16
0
def print_model(filename, n_hidden, lstm_layer, dropout, type_name):
    # Get original dataset
    x, y = get_dataset(type_name)
    # Fill original class type with the label 1
    y = [1 for label in y]

    # Get negative dataset
    neg_x, neg_y = get_dataset("not-" + type_name)

    # Fill original class type with the label 1
    neg_y = [0 for label in neg_y]
    x.extend(neg_x)
    y.extend(neg_y)

    # Flatten X coodinates and filter
    x = np.array(x)
    _x = []
    _y = []
    for idx, data in enumerate(x):
        data = [np.reshape(np.array(frames), (28)).tolist() for frames in data]
        _x.append(data)
        _y.append(y[idx])
    x = _x
    y = _y

    # Split to training and test dataset
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.3)
    x_train = np.array(x_train)
    x_test = np.array(x_test)
    y_train = np.array(y_train)
    y_test = np.array(y_test)

    # Define training parameters
    n_output = 1

    # Make LSTM Layer
    # Pair of lstm cell initialization through loop
    lstm_cells = [
        LSTMCell(n_hidden,
                 activation='relu',
                 use_bias=True,
                 unit_forget_bias=1.0) for _ in range(lstm_layer)
    ]
    stacked_lstm = StackedRNNCells(lstm_cells)

    # Decaying learning rate
    learning_rate = 1e-2
    lr_schedule = PolynomialDecay(initial_learning_rate=learning_rate,
                                  decay_steps=10,
                                  end_learning_rate=0.00001)
    optimizer = Adam(learning_rate=lr_schedule)

    # Initiate model
    model = Sequential()
    model.add(InputLayer(input_shape=x_train[0].shape))
    model.add(RNN(stacked_lstm))
    # model.add(LSTM(
    #     n_hidden,
    #     activation='relu',
    #     use_bias=True,
    #     unit_forget_bias = 1.0,
    #     input_shape=x_train[0].shape,
    #     return_sequences=True))
    # for _ in range(lstm_layer-2):
    #     model.add(LSTM(
    #         n_hidden,
    #         activation='relu',
    #         use_bias=True,
    #         unit_forget_bias = 1.0,
    #         return_sequences=True))
    # model.add(LSTM(
    #     n_hidden,
    #     activation='relu',
    #     use_bias=True,
    #     unit_forget_bias = 1.0))
    model.add(Dropout(dropout))
    model.add(
        Dense(n_output,
              activation='sigmoid',
              kernel_regularizer=regularizers.l2(0.01),
              activity_regularizer=regularizers.l1(0.01)))
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    # Train model
    model.fit(x_train,
              y_train,
              epochs=1,
              batch_size=1000,
              shuffle=True,
              validation_data=(x_test, y_test),
              validation_split=0.4)

    # Print model
    with open(f'{filename}.txt', 'w') as f:
        model.summary(print_fn=lambda x: f.write(x + '\n'))
    plot_model(model,
               to_file=f'{filename}.png',
               show_shapes=True,
               show_layer_names=True)
コード例 #17
0
    ]
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=y_true_labels, logits=y_pred_logits)
    weights = tf.gather(c_weights, y_true_labels)
    losses = tf.multiply(losses, weights)
    return tf.math.reduce_mean(losses)


EPOCHS = 350
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
VALIDATION_STEPS = VALID_LENGTH // BATCH_SIZE
DECAY_STEPS = (STEPS_PER_EPOCH * EPOCHS)  # // ACCUM_STEPS
print("Decay steps: {}".format(DECAY_STEPS))

learning_rate_fn = PolynomialDecay(initial_learning_rate=1e-2,
                                   decay_steps=DECAY_STEPS,
                                   end_learning_rate=1e-5,
                                   power=0.9)

model.compile(optimizer=SGD(learning_rate=learning_rate_fn,
                            momentum=0.9,
                            decay=0.0005),
              loss=weighted_cross_entropy_loss,
              metrics=['accuracy', iou_coef])

callbacks = [
    # ReduceLROnPlateau(monitor='val_iou_coef', mode='max', patience=10, factor=0.2, min_lr=1e-5, verbose=2),
    ModelCheckpoint(MODEL_PATH,
                    monitor='val_iou_coef',
                    mode='max',
                    verbose=2,
                    save_best_only=True,
コード例 #18
0
def train(exercise_name, epochs, batch_size, double, dataset):
    # Get keypoint
    x = []
    y = []
    for data in dataset:
        keypoints = np.array(data["keypoints"]).flatten()
        x.append(keypoints)

        is_starting_pose = data["is_starting_pose"]
        label = 1 if is_starting_pose else 0
        y.append(label)

    # Initialize paths
    base_path = "/home/kevin/projects/initial-pose-data/train_data"
    date_string = datetime.now().isoformat()
    filename = f'{exercise_name}_{epochs}_epochs_{batch_size}_batch_size_2x30 binary pose k-fold results {date_string}' if double else f'{exercise_name}_{epochs}_epochs_{batch_size}_batch_size binary pose k-fold results {date_string}'

    # Get dataset folders
    dirs = os.listdir(base_path)

    # One hot encoder
    y = np.array(y)
    # y = y.reshape(-1, 1)
    # one_hot = OneHotEncoder(sparse=False)
    # y = one_hot.fit_transform(y)

    # Create file and write CSV header
    write_header(filename)
    body = {}

    # Initialize total accuracy variable and number of K-Fold splits
    total = 0
    n_splits = 10

    # Initialize K Fold
    skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1)
    k_fold_index = 1

    x = np.array(x)
    y = np.array(y)
    for train_index, test_index in skf.split(x, y):
        x_train = x[train_index]
        y_train = y[train_index]
        x_test = x[test_index]
        y_test = y[test_index]
    
        # Define number of features, labels, and hidden
        num_features = 28 # 14 pairs of (x, y) keypoints
        num_hidden = 8
        num_output = 1

        # Decaying learning rate
        learning_rate = 0.01
        lr_schedule = PolynomialDecay(
            initial_learning_rate=learning_rate,
            decay_steps=10,
            end_learning_rate= 0.00001
        )
        optimizer = SGD(learning_rate = lr_schedule)

        model = Sequential()
        model.add(Dense(60, input_shape=(num_features,)))
        model.add(Dense(30, activation='relu'))
        if double:
            model.add(Dense(30, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(num_output, activation='sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])

        # Train model
        model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, shuffle = True, validation_data = (x_test, y_test), validation_split = 0.3)

        # Find accuracy
        _, accuracy = model.evaluate(x_test, y_test)
        accuracy *= 100
        total += accuracy
        body['k-fold ' + str(k_fold_index)] = "{:.2f}".format(accuracy)
        print('Accuracy: %.2f' % (accuracy))
        k_fold_index += 1

    # Write iterations
    body['exercise name'] = exercise_name
    body['avg'] = "{:.2f}".format(total/n_splits)
    write_body(filename, body)
コード例 #19
0
ファイル: policy.py プロジェクト: mahaitongdae/mpg
    def __init__(self, obs_dim, act_dim, value_model_cls,
                 value_num_hidden_layers, value_num_hidden_units,
                 value_hidden_activation, value_lr_schedule, policy_model_cls,
                 policy_num_hidden_layers, policy_num_hidden_units,
                 policy_hidden_activation, policy_out_activation,
                 policy_lr_schedule, alpha, alpha_lr_schedule, policy_only,
                 double_Q, target, tau, delay_update, deterministic_policy,
                 action_range, **kwargs):
        super().__init__()
        self.policy_only = policy_only
        self.double_Q = double_Q
        self.target = target
        self.tau = tau
        self.delay_update = delay_update
        self.deterministic_policy = deterministic_policy
        self.action_range = action_range
        self.alpha = alpha

        value_model_cls, policy_model_cls = NAME2MODELCLS[value_model_cls], \
                                            NAME2MODELCLS[policy_model_cls]
        self.policy = policy_model_cls(obs_dim,
                                       policy_num_hidden_layers,
                                       policy_num_hidden_units,
                                       policy_hidden_activation,
                                       act_dim * 2,
                                       name='policy',
                                       output_activation=policy_out_activation)
        self.policy_target = policy_model_cls(
            obs_dim,
            policy_num_hidden_layers,
            policy_num_hidden_units,
            policy_hidden_activation,
            act_dim * 2,
            name='policy_target',
            output_activation=policy_out_activation)
        policy_lr = PolynomialDecay(*policy_lr_schedule)
        self.policy_optimizer = self.tf.keras.optimizers.Adam(
            policy_lr, name='policy_adam_opt')

        self.Q1 = value_model_cls(obs_dim + act_dim,
                                  value_num_hidden_layers,
                                  value_num_hidden_units,
                                  value_hidden_activation,
                                  1,
                                  name='Q1')
        self.Q1_target = value_model_cls(obs_dim + act_dim,
                                         value_num_hidden_layers,
                                         value_num_hidden_units,
                                         value_hidden_activation,
                                         1,
                                         name='Q1_target')
        self.Q1_target.set_weights(self.Q1.get_weights())
        value_lr = PolynomialDecay(*value_lr_schedule)
        self.Q1_optimizer = self.tf.keras.optimizers.Adam(value_lr,
                                                          name='Q1_adam_opt')

        self.Q2 = value_model_cls(obs_dim + act_dim,
                                  value_num_hidden_layers,
                                  value_num_hidden_units,
                                  value_hidden_activation,
                                  1,
                                  name='Q2')
        self.Q2_target = value_model_cls(obs_dim + act_dim,
                                         value_num_hidden_layers,
                                         value_num_hidden_units,
                                         value_hidden_activation,
                                         1,
                                         name='Q2_target')
        self.Q2_target.set_weights(self.Q2.get_weights())
        self.Q2_optimizer = self.tf.keras.optimizers.Adam(value_lr,
                                                          name='Q2_adam_opt')

        if self.policy_only:
            self.target_models = ()
            self.models = (self.policy, )
            self.optimizers = (self.policy_optimizer, )
        else:
            if self.double_Q:
                assert self.target
                self.target_models = (
                    self.Q1_target,
                    self.Q2_target,
                    self.policy_target,
                )
                self.models = (
                    self.Q1,
                    self.Q2,
                    self.policy,
                )
                self.optimizers = (
                    self.Q1_optimizer,
                    self.Q2_optimizer,
                    self.policy_optimizer,
                )
            elif self.target:
                self.target_models = (
                    self.Q1_target,
                    self.policy_target,
                )
                self.models = (
                    self.Q1,
                    self.policy,
                )
                self.optimizers = (
                    self.Q1_optimizer,
                    self.policy_optimizer,
                )
            else:
                self.target_models = ()
                self.models = (
                    self.Q1,
                    self.policy,
                )
                self.optimizers = (
                    self.Q1_optimizer,
                    self.policy_optimizer,
                )

        if self.alpha == 'auto':
            self.alpha_model = AlphaModel(name='alpha')
            alpha_lr = self.tf.keras.optimizers.schedules.PolynomialDecay(
                *alpha_lr_schedule)
            self.alpha_optimizer = self.tf.keras.optimizers.Adam(
                alpha_lr, name='alpha_adam_opt')
            self.models += (self.alpha_model, )
            self.optimizers += (self.alpha_optimizer, )