コード例 #1
0
def train(max_features, maxlen, num_nodes, dropout, optimizer,
          log_learning_rate, batch_size, epochs):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(Bidirectional(LSTM(num_nodes)))
    model.add(Dropout(dropout))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(OPTIMIZERS[optimizer](lr=10**log_learning_rate),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=[x_test, y_test],
              callbacks=[
                  EarlyStopping(monitor='val_loss',
                                min_delta=1e-4,
                                patience=3,
                                verbose=1,
                                mode='auto'),
                  PolyaxonKerasCallback(),
                  PolyaxonKerasModelCheckpoint(),
                  TensorBoard(log_dir=tracking.get_tensorboard_path(),
                              histogram_freq=1),
                  ModelCheckpoint(tracking.get_model_path())
              ])

    return model.evaluate(x_test, y_test)[1]
コード例 #2
0
ファイル: model.py プロジェクト: vishalbelsare/polyaxon
def train(max_features, maxlen, embedding_size, kernel_size, optimizer,
          filters, pool_size, lstm_output_size, log_learning_rate, batch_size,
          epochs):
    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout(0.25))
    model.add(
        Conv1D(filters,
               kernel_size,
               padding='valid',
               activation='relu',
               strides=1))
    model.add(MaxPooling1D(pool_size=pool_size))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(OPTIMIZERS[optimizer](lr=10**log_learning_rate),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(x_test, y_test),
              callbacks=[
                  PolyaxonCallback(),
                  TensorBoard(log_dir=tracking.get_tensorboard_path(),
                              histogram_freq=1),
                  ModelCheckpoint(tracking.get_outputs_path("model"))
              ])

    score, accuracy = model.evaluate(x_test, y_test, batch_size=batch_size)
    return score, accuracy
コード例 #3
0
def main(args):
    # MultiWorkerMirroredStrategy creates copies of all variables in the model's
    # layers on each device across all workers
    # if your GPUs don't support NCCL, replace "communication" with another
    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
        communication=tf.distribute.experimental.CollectiveCommunication.NCCL)

    BATCH_SIZE_PER_REPLICA = 64
    BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync

    with strategy.scope():
        ds_train = make_datasets_unbatched().batch(BATCH_SIZE).repeat()
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = \
            tf.data.experimental.AutoShardPolicy.DATA
        ds_train = ds_train.with_options(options)
        # Model building/compiling need to be within `strategy.scope()`.
        multi_worker_model = get_model(args)

    # Function for decaying the learning rate.
    # You can define any decay function you need.
    # Callback for printing the LR at the end of each epoch.
    class PrintLR(tf.keras.callbacks.Callback):

        def on_epoch_end(self, epoch, logs=None):
            print('\nLearning rate for epoch {} is {}'.format(
                epoch + 1, multi_worker_model.optimizer.lr.numpy()))
    callbacks = [
        PrintLR(),
        tf.keras.callbacks.LearningRateScheduler(decay),
    ]

    # Polyaxon
    if TASK_INDEX == 0:
        plx_callback = PolyaxonKerasCallback()
        plx_model_callback = PolyaxonKerasModelCheckpoint(save_weights_only=True)
        log_dir = tracking.get_tensorboard_path()
        callbacks = [
            tf.keras.callbacks.TensorBoard(log_dir=log_dir),
            plx_model_callback,
            plx_callback,
        ]

    # Keras' `model.fit()` trains the model with specified number of epochs and
    # number of steps per epoch. Note that the numbers here are for demonstration
    # purposes only and may not sufficiently produce a model with good quality.
    multi_worker_model.fit(ds_train,
                           epochs=args.epochs,
                           steps_per_epoch=70,
                           callbacks=callbacks)

    multi_worker_model.save("/tmp/model")

    if TASK_INDEX == 0:
        tracking.log_model(path="/tmp/model", framework="tensorflow")
コード例 #4
0
ファイル: main.py プロジェクト: vishalbelsare/polyaxon
def train_network():
    data_size = 1000
    # 80% of the data is for training.
    train_pct = 0.8

    train_size = int(data_size * train_pct)

    # Create some input data between -1 and 1 and randomize it.
    x = np.linspace(-1, 1, data_size)
    np.random.shuffle(x)

    # Generate the output data.
    # y = 0.5x + 2 + noise
    y = 0.5 * x + 2 + np.random.normal(0, 0.5, (data_size, ))

    # Split into test and train pairs.
    x_train, y_train = x[:train_size], y[:train_size]
    x_test, y_test = x[train_size:], y[train_size:]

    file_writer = tf.summary.create_file_writer(
        tracking.get_tensorboard_path())
    file_writer.set_as_default()

    def lr_schedule(epoch):
        """
        Returns a custom learning rate that decreases as epochs progress.
        """
        learning_rate = 0.3435
        if epoch > 10:
            learning_rate = 0.0223
        if epoch > 20:
            learning_rate = 0.012
        if epoch > 50:
            learning_rate = 0.00532

        tf.summary.scalar('learning rate', data=learning_rate, step=epoch)
        tracking.log_metric('learning rate', value=learning_rate, step=epoch)
        return learning_rate

    lr_callback = keras.callbacks.LearningRateScheduler(lr_schedule)
    tensorboard_callback = keras.callbacks.TensorBoard(
        log_dir=tracking.get_tensorboard_path())
    plx_callback = PolyaxonKerasCallback(run=tracking.TRACKING_RUN)

    model = keras.models.Sequential([
        keras.layers.Dense(16, input_dim=1),
        keras.layers.Dense(1),
    ])

    model.compile(
        loss='mse',  # keras.losses.mean_squared_error
        optimizer=keras.optimizers.SGD(),
    )

    model.fit(
        x_train,  # input
        y_train,  # output
        batch_size=train_size,
        verbose=0,  # Suppress chatty output; use Tensorboard instead
        epochs=100,
        validation_data=(x_test, y_test),
        callbacks=[tensorboard_callback, lr_callback, plx_callback],
    )
コード例 #5
0
    X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 1)

    # one hot encode outputs
    y_train = keras.utils.to_categorical(y_train)
    y_test = keras.utils.to_categorical(y_test)
    num_classes = y_test.shape[1]

    # Polyaxon
    tracking.init()
    tracking.log_data_ref(content=X_train, name='x_train')
    tracking.log_data_ref(content=y_train, name='y_train')
    tracking.log_data_ref(content=X_test, name='x_test')
    tracking.log_data_ref(content=y_test, name='y_train')

    plx_callback = PolyaxonCallback()
    log_dir = tracking.get_tensorboard_path()

    print("log_dir", log_dir)
    print("model_dir", plx_callback.filepath)
    # TF Model
    model = create_model(conv1_size=args.conv1_size,
                         conv2_size=args.conv2_size,
                         dropout=args.dropout,
                         hidden1_size=args.hidden1_size,
                         conv_activation=args.conv_activation,
                         dense_activation=args.dense_activation,
                         optimizer=args.optimizer,
                         learning_rate=args.learning_rate,
                         loss=args.loss,
                         num_classes=y_test.shape[1])