Ejemplo n.º 1
0
model.add(Dropout(config.dropout_rate))
# output layer
model.add(Dense(units=1, activation='sigmoid'))  # 이진분류 binary

model.summary()

# 6. 모델 설정
opt = tf.keras.optimizers.Adam(learning_rate=config.learning_rate,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-07,
                               amsgrad=False,
                               name="Adam")
model.compile(loss='binary_crossentropy', optimizer=opt,
              metrics=['acc'])  # 이진분류 binary
callback = EarlyStopping(monitor='val_loss',
                         patience=10,
                         verbose=0,
                         mode='auto',
                         restore_best_weights=False)
callbacks_list = [callback, WandbCallback()]

# 7. 모델 피팅
history = model.fit(x_train,
                    y_train,
                    batch_size=config.batch_size,
                    epochs=100,
                    validation_data=(x_val, y_val),
                    callbacks=callbacks_list,
                    verbose=2)
Ejemplo n.º 2
0
                                    "model_history_log.csv"),
                       append=True)

steps_per_epoch = n_files_train * (n_events_per_file // batch_size)
n_batches_per_file = n_events_per_file // batch_size
print(
    f"steps_per_epoch {steps_per_epoch}, n_batches_per_file {n_batches_per_file}"
)

dataset_train = tf.data.Dataset.range(n_files_train).prefetch(
    n_batches_per_file * 10).interleave(TrainDatasetEven,
                                        cycle_length=2,
                                        num_parallel_calls=2,
                                        deterministic=False).repeat()

dataset_val = tf.data.Dataset.range(n_files_val).prefetch(
    n_batches_per_file * 10).interleave(ValDatasetEven,
                                        cycle_length=2,
                                        num_parallel_calls=2,
                                        deterministic=False)

history = model.fit(x=dataset_train,
                    steps_per_epoch=steps_per_epoch,
                    epochs=20,
                    validation_data=dataset_val,
                    callbacks=[checkpoint, csv_logger,
                               WandbCallback()])
with open(os.path.join('saved_models', filename, 'history.pkl'),
          'wb') as file_pi:
    pickle.dump(history.history, file_pi)
def run_experiment(experiment_config: Dict,
                   save_model: bool = True,
                   use_wandb: bool = True):
    """
    Run a training experiment.
    Parameters
    ----------
    experiment_config (dict)
        Of the form
        {
            "dataset": "Dataset",
            "dataset_args": {
                "batch_size": 128,
                "test_ratio": 0.3
            },
            "model": "RetrievalModel",
            "network": "retrieval_basic_factorization",
            "network_args": {
                "hidden_dim": 64,
            },
            "train_args": {
                "epochs": 10,
                "optimizer": SGD
            }
        }

    """

    print(experiment_config)

    models_module = importlib.import_module("recommenders.models")
    model_class_ = getattr(models_module, experiment_config["model"])

    networks_module = importlib.import_module("recommenders.networks")
    network_fn_ = getattr(networks_module, experiment_config["network"])
    network_args = experiment_config.get("network_args", {})

    datasets_module = importlib.import_module("recommenders.datasets")
    dataset_class_ = getattr(datasets_module, experiment_config["dataset"])
    dataset_args = experiment_config.get("dataset_args", {})
    dataset = dataset_class_(**dataset_args)
    dataset.load_or_generate_data()

    model = model_class_(dataset=dataset,
                         network_fn=network_fn_,
                         network_args=network_args)

    if use_wandb:
        wandb.init(config=experiment_config)

    callbacks = list()
    callbacks.append(WandbCallback())
    optimizer = _get_optimizer(experiment_config["train_args"]["optimizer"])
    model.compile(optimizer=optimizer(
        learning_rate=experiment_config["train_args"]["learning_rate"]))

    model.fit(dataset.train,
              epochs=experiment_config["train_args"]["epochs"],
              validation_data=dataset.test,
              validation_freq=20,
              callbacks=callbacks)

    model.print_summary()

    if save_model:
        if use_wandb:
            model.save_weights(wandb.run.dir)
        else:
            model.save_weights()
Ejemplo n.º 4
0
def train():
    model_settings = models.prepare_model_settings(len(data_process.prepare_words_list(FLAGS.wanted_words.split(","))),
                                                   FLAGS.sample_rate,
                                                   FLAGS.clip_duration_ms,
                                                   FLAGS.window_size_ms,
                                                   FLAGS.window_strides_ms,
                                                   FLAGS.dct_coefficient_count)

    # Create model
    model = models.create_model(model_settings,
                                FLAGS.model_architecture,
                                FLAGS.model_size_info)

    # Create checkpoint for convert mlir
    #checkpoint = tf.train.Checkpoint(model)
    print("test->", FLAGS.wanted_words)
    # audio processor
    audio_processor = data_process.AudioProcessor(data_dir=FLAGS.data_dir,
                                                  silence_percentage=FLAGS.silence_percentage,
                                                  unknown_percentage=FLAGS.unknown_percentage,
                                                  augment_percentage=FLAGS.augment_percentage,
                                                  wanted_words=FLAGS.wanted_words.split(","),
                                                  model_settings=model_settings)

    # decaay learning rate in a constant piecewise way
    training_steps_list = list(map(int, FLAGS.how_many_train_steps.split(",")))
    learning_rates_list = list(map(float, FLAGS.learning_rate.split(",")))
    lr_boundary_list = training_steps_list[:-1]  # only need values at which to change lr
    lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=lr_boundary_list,
                                                                       values=learning_rates_list)

    # specify optimizer
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
    # Create checkpoint for convert mlir
    checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
    model.compile(optimizer=optimizer,
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=["accuracy"])

    # prepare datasets
    train_dataset = audio_processor.get_data(audio_processor.Modes.training,
                                             FLAGS.background_frequency,
                                             FLAGS.background_volume,
                                             int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000))
    buffer_size = audio_processor.get_set_size(audio_processor.Modes.training)
    print("train set set:", buffer_size)
    train_dataset = train_dataset.shuffle(buffer_size=buffer_size).repeat().batch(FLAGS.batch_size).prefetch(1)

    val_dataset = audio_processor.get_data(audio_processor.Modes.validation)
    val_dataset = val_dataset.batch(FLAGS.batch_size).prefetch(1)

    # calculate how many epochs because we train for a max number of iterations
    train_max_steps = np.sum(training_steps_list)
    train_max_epochs = int(np.ceil(train_max_steps / FLAGS.eval_step_interval))

    # save models
    train_dir = Path(FLAGS.train_dir) / "best"
    train_dir.mkdir(parents=True, exist_ok=True)
    checkpoint_dir = train_dir / (FLAGS.model_architecture + "_{val_acc:.3f}_ckpt")
    model_checkpoint_call_back = tf.keras.callbacks.ModelCheckpoint(
        filepath=(train_dir / (FLAGS.model_architecture + "_{val_accuracy:.3f}_ckpt")),
        save_weights_only=True,
        monitor="val_accuracy",
        mode="max",
        save_best_only=True)

    # Model summary
    model.summary()
    # Train model
    model.fit(x=train_dataset,
              steps_per_epoch=FLAGS.eval_step_interval,
              validation_data=val_dataset,
              callbacks=[model_checkpoint_call_back,WandbCallback()],
              verbose=1,
              epochs=train_max_epochs*3)

    print("Training model finshed, start to test...")
    # test and save model
    test_dataset = audio_processor.get_data(audio_processor.Modes.testing)
    test_dataset = test_dataset.batch(FLAGS.batch_size)

    test_loss, test_acc = model.evaluate(test_dataset)
    print(f"LOG====>Final test accuracy:{test_acc:0.2f} loss:{test_loss:0.3f}")
    
    # ==========saved_model===========================#
    model.save("./result/kws")
    model.save("./result/kws.h5")
    # =================checkpoint to mlir===================#
    save_path = checkpoint.save("./result/ck")
Ejemplo n.º 5
0
            print('----- diversity:', diversity)

            generated = ''
            sentence = text[start_index:start_index + config.maxlen]
            generated += sentence
            print('----- Generating with seed: "' + sentence + '"')
            sys.stdout.write(generated)

            for i in range(200):
                x_pred = np.zeros((1, config.maxlen, len(chars)))
                for t, char in enumerate(sentence):
                    x_pred[0, t, char_indices[char]] = 1.

                preds = model.predict(x_pred, verbose=0)[0]
                next_index = sample(preds, diversity)
                next_char = indices_char[next_index]

                generated += next_char
                sentence = sentence[1:] + next_char

                sys.stdout.write(next_char)
                sys.stdout.flush()
            print()


model.fit(x,
          y,
          batch_size=config.batch_size,
          epochs=100,
          callbacks=[SampleText(), WandbCallback()])
Ejemplo n.º 6
0
def main(args):
    train_df = data.load_data.load_custom_text_as_pd(args.train_data,sep='\t',header=True, \
                              text_column=['Text'],target_column=['Label'])
    val_df = data.load_data.load_custom_text_as_pd(args.val_data,sep='\t', header=False, \
                          text_column=['Text'],target_column=['Label'])

    train_df = pd.DataFrame(train_df,copy=False)
    val_df = pd.DataFrame(val_df,copy=False)
    val_df.columns = train_df.columns

    model_save_dir = args.model_save_path

    try:
        os.makedirs(model_save_dir)
    except OSError:
        pass

    train_df.labels, label2idx = data.data_utils.convert_categorical_label_to_int(train_df.labels, \
                                                         save_path=os.path.join(model_save_dir,'label2idx.pkl'))

    val_df.labels, _ = data.data_utils.convert_categorical_label_to_int(val_df.labels, \
                                                         save_path=os.path.join(model_save_dir,'label2idx.pkl'))

    print ("Tokenization")
    trainX, tokenizer = data.data_utils.compute_lstm_input_arrays(train_df, 'words', args.max_text_len)
    valX, _ = data.data_utils.compute_lstm_input_arrays(val_df, 'words', args.max_text_len, tokenizer=tokenizer)

    trainX = np.asarray(trainX)
    valX = np.asarray(valX)

    outputs = data.data_utils.compute_output_arrays(train_df, 'labels')
    val_outputs = data.data_utils.compute_output_arrays(val_df, 'labels')

    outputs = outputs[:,np.newaxis]
    val_outputs = val_outputs[:,np.newaxis]

    num_words = len(tokenizer.word_index)

    print ("Modelling")
    model = models.tf_models.bilstm(args.n_lstm, args.max_text_len, num_words, args.emb_dim, dropout=args.dropout)

    print (model.summary())

    optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)  #SGD

    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics='accuracy') #binary_crossentropy

    early = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=8, \
                                         verbose=1, mode='auto', restore_best_weights=True)
    lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.7, \
                                          patience=5, verbose=1, mode='auto', min_lr=0.000001)
    f1callback = models.tf_utils.F1Callback(model, valX, val_outputs, filename=os.path.join(model_save_dir, 'model.h5'), patience=8)

    config = {
      'text_max_len': args.max_text_len,
      'epochs': args.epochs,
      "learning_rate": args.lr,
      "batch_size": args.train_batch_size,
      "n_lstm": args.n_lstm,
      "emb_dim": args.emb_dim,
      "dropout": args.dropout,
      "model_description": "LSTM",
    }

    with open(os.path.join(model_save_dir, 'config.pkl'), 'wb') as handle:
        pickle.dump(config, handle, protocol=pickle.HIGHEST_PROTOCOL)

    K.clear_session()

    if _has_wandb and args.wandb_logging:
        wandb.init(project='wnut-task2',config=config)
        model.fit(trainX, outputs, validation_data=(valX, val_outputs), epochs=args.epochs,\
              batch_size=args.train_batch_size, callbacks=[early, lr, f1callback, WandbCallback()], verbose=1)
    else:
        model.fit(trainX, outputs, validation_data=(valX, val_outputs), epochs=args.epochs,\
              batch_size=args.train_batch_size, callbacks=[early,lr, f1callback], verbose=1)

    model.load_weights(os.path.join(model_save_dir, 'model.h5'))
    model_json = model.to_json()
    with open(os.path.join(model_save_dir,"model.json"), "w") as json_file:
        json_file.write(model_json)

    val_pred = np.round(model.predict(valX))[:,0]

    print ("Evaluation")
    
    f1 = f1_score(val_df.labels, val_pred)
    precision = precision_score(val_df.labels, val_pred)
    recall = recall_score(val_df.labels, val_pred)

    #f1 = f1_score([idx2label[i] for i in val_df.labels], [idx2label[i] for i in val_pred])
    #precision = precision_score([idx2label[i] for i in val_df.labels], [idx2label[i] for i in val_pred])
    #recall = recall_score([idx2label[i] for i in val_df.labels], [idx2label[i] for i in val_pred])

    results_ = pd.DataFrame()
    results_['description'] = ['Bi-LSTM']
    results_['f1'] = [f1]
    results_['precision'] = [precision]
    results_['recall'] = [recall]

    print (results_.iloc[0])

    if os.path.exists('../results/result.csv'):
        results = pd.read_csv('../results/result.csv')
        results = pd.concat([results, results_], axis=0)
        results.to_csv('../results/result.csv', index=False)
    else:
        results_.to_csv('../results/result.csv', index=False)
Ejemplo n.º 7
0
        # network is not reachable, so we use random data
        x_train = tf.random.normal((n_train, n_features), dtype='float32')
        x_test = tf.random.normal((n_test, n_features), dtype='float32')
        y_train = tf.random.uniform((n_train,), minval=0, maxval=n_classes, dtype='int32')
        y_test = tf.random.uniform((n_test,), minval=0, maxval=n_classes, dtype='int32')
        return x_train, x_test, y_train, y_test


    # wandb setup
    Path(cfg.wandb.dir).mkdir(exist_ok=True, parents=True)
    wandb.init(
        config=OmegaConf.to_container(cfg, resolve=True),
        **cfg.wandb,
    )
    callbacks = [
        WandbCallback(monitor='loss', save_weights_only=True),
    ]

    # model building
    tf.keras.backend.clear_session()
    model = my_model(**cfg.model)
    model_compile(model, **cfg.compile)
    x_train, x_test, y_train, y_test = data(**cfg.data)
    history = model.fit(x_train, y_train, **cfg.fit, callbacks=callbacks)
    test_scores = model.evaluate(x_test, y_test, verbose=2)
    print('Test loss:', test_scores[0])
    print('Test accuracy:', test_scores[1])
    return True

if __name__ == '__main__':
    train_dense_model_main()
Ejemplo n.º 8
0
def train():
    """Use the wandb configuration to create a model and train it"""
    # get the configuration settings from wandb
    config = wandb.config 
    
    tr_gen = DatasetGenerator(myConfig, shuffle=False, multitask=False)
    
    # get the datasets and relevant variables
    train_ds, train_steps, val_ds, val_steps, classes = _load_dataset(config)
    
    # TODO: be smarter about this
    img_shape = (224,224,3)
    
    # Setup a multi-gpu distributed batch training strategy
    strategy = tf.distribute.MirroredStrategy()
    
    with strategy.scope():
        # handle if model is not found
        try:
            model_cl = getattr(tf.keras.applications, config['model'])
        except AttributeError:
            raise SyntaxError(f'Model "{config["model"]}" was not found, please adjust your sweep configuration.')

        # get pretrained model with top removed
        model = model_cl(
            include_top=False, 
            weights='imagenet' if config['imagenet'] else None, 
            input_shape=img_shape
        )

        # add data augmentation
        model = add_preprocessing(model)

        # build model to config specifications
        model = build_model(
            model, 
            config, 
            len(classes)
        )

        # may replace with wandb equivalent
        earlystop = tf.keras.callbacks.EarlyStopping(
            monitor='val_loss', 
            mode='min', 
            verbose=1
        )

        # setup checkpointing between epochs
        final_model_path = os.path.join(wandb.run.dir,'/checkpoints/final.ckpt')
        model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=final_model_path,
            monitor='val_loss',
            mode='min',
            save_best_only=True,
            verbose=1
        )


        # compile the model
        model.compile(
            optimizer=opt,
            metrics=mets,
            loss=loss_fn, 
            loss_weights=loss_ws
        )

        # wandb sample logging
        tr_batch = next(train_ds)
        if config['orientation']:
            tr_image_batch, (tr_label_batch, tr_orientation_batch) = tr_batch
            log_batch(tr_image_batch.numpy(), tr_label_batch.numpy(), classes, orientation_batch=tr_orientation_batch.numpy())
        else:
            tr_image_batch, tr_label_batch = tr_batch
            log_batch(tr_image_batch.numpy(), tr_label_batch.numpy(), classes)

        # perfom training
        model.fit( 
            train_ds,
            steps_per_epoch=train_steps, 
            validation_data=val_ds,
            validation_steps=val_steps,
            epochs=config['epochs'], 
            callbacks=[earlystop, 
                       WandbCallback(),
                       model_checkpoint_callback
                      ]
        )

        # save the best model checkpoint to wandb
        wandb.save(final_model_path)
Ejemplo n.º 9
0
model.add(Conv2D(32,
                 (config.first_layer_conv_width, config.first_layer_conv_height),
                 input_shape=(28, 28, 1),
                 activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(config.dense_layer_size, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam',
              metrics=['accuracy'], weighted_metrics=['accuracy'])


model.fit(X_train, y_train, validation_data=(X_test, y_test),
          epochs=config.epochs,
          callbacks=[WandbCallback(data_type="image", save_model=False),    
                     TensorBoard(log_dir=wandb.run.dir)])

from keras import backend as K
import tensorflow as tf
from tensorflow.python.tools import freeze_graph


model.save("cnn.h5")
saver = tf.train.Saver()
saver.save(K.get_session(), './keras_model.ckpt')

freeze_graph.freeze_graph('tensorflowModel.pbtxt', "", False, 
                          './tensorflowModel.ckpt', "output/softmax",
                           "save/restore_all", "save/Const:0",
                           'frozentensorflowModel.pb', True, ""  
Ejemplo n.º 10
0
model = Sequential()
model.add(
    Conv1D(filters=64,
           kernel_size=4,
           activation='relu',
           input_shape=(n_input, n_features)))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(wandb.config.dropout))
model.add(Flatten())
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# validation_split=0.33
# #metrics=['accuracy', 'loss', 'val_accuracy', 'val_loss'],)

# %%
model.fit(generator, epochs=wandb.config.epochs, callbacks=[WandbCallback()])
model.save(os.path.join(wandb.run.dir, "cnn-model.h5"))

# %%
pred_list = []

batch = train[-n_input:].reshape((1, n_input, n_features))

for i in range(n_input):
    pred_list.append(model.predict(batch)[0])
    batch = np.append(batch[:, 1:, :], [[pred_list[i]]], axis=1)

# %%
df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
                          index=df[-n_input:].index,
                          columns=['Prediction'])
Ejemplo n.º 11
0
                            bias_regularizer=keras.regularizers.l2(10e-6)),
            keras.layers.Dense(NUM_CLASSES) # softmax not needed as loss specifies from_logits
        ])

        model.compile(optimizer=keras.optimizers.SGD(learning_rate=0.01),
                    loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                    metrics=['accuracy', keras.metrics.SparseCategoricalCrossentropy(from_logits=True)])

        # train the model
        #! FIT TO CV TRAIN SET
        history = model.fit(X_train_, y_train_,
                            epochs=epochs,
                            verbose = 2,
                            batch_size=batch_size,
                            validation_data=(X_val, y_val),
                            callbacks=[time_callback, WandbCallback(log_weights=True)])

        train_accs.append(history.history['accuracy'])
        val_accs.append(history.history['val_accuracy'])
        times.append(time_callback.times[0])

    AVG_TRAIN_ACCS.append(np.mean(np.stack(train_accs, axis=0), axis=0))
    AVG_VAL_ACCS.append(np.mean(np.stack(val_accs, axis=0), axis=0))
    AVG_TIMES.append(np.mean(times))

    #! IDEA: collect the val data from fit -> stack over CV runs -> take mean -> plot

np.save('./data/1a_3/train_accs_350.npy', AVG_TRAIN_ACCS)
np.save('./data/1a_3/val_accs_350.npy', AVG_VAL_ACCS)
np.save('./data/1a_3/avg_time_1_epoch_350.npy', AVG_TIMES)
Ejemplo n.º 12
0
def train():
    dataloaders = list()
    if 'big_exam' in wandb.config.datasets:
        dataloaders.append(AbnormalBigExamLoader(wandb_config=wandb.config))
    if 'audicor_10s' in wandb.config.datasets:
        dataloaders.append(AbnormalAudicor10sLoader(wandb_config=wandb.config))
    '''
    g = BaseDataGenerator(dataloaders=dataloaders,
                            wandb_config=wandb.config,
                            preprocessing_fn=preprocessing)
    train_set, valid_set, test_set = g.get()
    '''
    big_data=np.load('big_signal_label_634_5_28.npy')
    
    hht_set_signal=np.load('./abnormal_detection/wavelet_2c/s3s4label_hs1.npy')[:,3:]
    #hht_set_signal=np.delete(hht_set_signal,433,axis = 0)
    #hht_set_signal=np.delete(hht_set_signal,525,axis = 0)

    hht_set_signal_h2=np.load('./abnormal_detection/wavelet_2c/s3s4label_hs2.npy')[:,3:]
    
    
    hht_set_signal = np.array([np.array(hht_set_signal),np.array(hht_set_signal_h2)])
    
    hht_set_signal=np.transpose(hht_set_signal,(1,2,0))
    #hht_set_signal=np.vstack((hht_set_signal,amp))
    print('hht_set_signal',hht_set_signal.shape)
    
    
    a=big_data[:,1:3]
    b=np.zeros((big_data.shape[0],1))
    for i in range(big_data.shape[0]):
        #print('a[i,0]',a[i,0])
        if(a[i,0]=='1.0' ):
            b[i]=1
        if(a[i,1]=='1.0'):
            b[i]=1
    #print(a[:5,0],a[:5,1])
    #print(a[:5],a.shape)
    #b=np.delete(b,433,axis = 0)
    #b=np.delete(b,525,axis = 0)
    print(b[:5],b.shape)
    duration = 10.0
    fs = 1000.0
    samples = int(fs*duration)
    t = np.arange(samples) / fs
    train_set_size=int(big_data.shape[0]/2)
    valid_set_size=int(train_set_size+big_data.shape[0]*0.2)
    #print('set_size',train_set_size,valid_set_size)
    train_set_signal=np.array((hht_set_signal[:train_set_size]), dtype=np.float)#0.5
    valid_set_signal=np.array((hht_set_signal[train_set_size:valid_set_size]), dtype=np.float)#0.2
    test_set_signal=np.array((hht_set_signal[valid_set_size:]), dtype=np.float)#0.3
    
    #train_set_label=np.array((big_data[:298,1:2]), dtype=np.float)
    #valid_set_label=np.array((big_data[298:418,1:2]), dtype=np.float)
    #test_set_label=np.array((big_data[418:,1:2]), dtype=np.float)
    
    train_set_label=np.array((b[:train_set_size]), dtype=np.float)
    valid_set_label=np.array((b[train_set_size:valid_set_size]), dtype=np.float)
    test_set_label=np.array((b[valid_set_size:]), dtype=np.float)
    print('test_set_label',np.sum(test_set_label),test_set_label.shape,test_set_signal.shape)
    train_set_label=onehot(train_set_label)
    valid_set_label=onehot(valid_set_label)
    test_set_label=onehot(test_set_label)

    train_set = [np.array(train_set_signal), np.array(train_set_label)]
    valid_set = [np.array(valid_set_signal), np.array(valid_set_label)]
    test_set = [np.array(test_set_signal), np.array(test_set_label)]
    
    train_set[0], means_and_stds = normalize(train_set[0])
    valid_set[0], _ = normalize(valid_set[0], means_and_stds)
    test_set[0], _ = normalize(test_set[0], means_and_stds)
    
    train_set[0]    = multi_input_format(train_set[0], wandb.config.include_info)
    valid_set[0]    = multi_input_format(valid_set[0], wandb.config.include_info)
    test_set[0]     = multi_input_format(test_set[0], wandb.config.include_info)
    
    print('have',train_set[1][:, 0].sum()/ train_set[1][:, 0].shape[0],valid_set[1][:, 0].sum()/ valid_set[1][:, 0].shape[0],test_set[1][:, 0].sum()/ test_set[1][:, 0].shape[0])
    print('have',train_set[1][:, 1].sum()/ train_set[1][:, 0].shape[0],valid_set[1][:, 1].sum()/ valid_set[1][:, 0].shape[0],test_set[1][:, 1].sum()/ test_set[1][:, 0].shape[0])
    print(train_set[1][:, 0].shape[0],valid_set[1][:, 0].shape[0],test_set[1][:, 0].shape[0])
    for X in [train_set[0], valid_set[0], test_set[0]]:
        if wandb.config.n_ekg_channels != 0:
            X['ekg_input'] = X['ekg_hs_input'][..., :wandb.config.n_ekg_channels]
        if wandb.config.n_hs_channels != 0:
            print('X',X['ekg_hs_input'].shape)
            hs = X['ekg_hs_input'][..., -wandb.config.n_hs_channels:] # (?, n_samples, n_channels)
            X['hs_input'] = mp_generate_wavelet(hs,wandb.config.sampling_rate, 
                                                            wandb.config.wavelet_scale_length,
                                                            'Generate Wavelets')
        X.pop('ekg_hs_input')
    
    # save means and stds to wandb
    #with open(os.path.join(wandb.run.dir, 'means_and_stds.pl'), 'wb') as f:
        #pickle.dump(g.means_and_stds, f)

    model = backbone(wandb.config, include_top=True, classification=True, classes=2)
    model.compile(RAdam(1e-4) if wandb.config.radam else Adam(amsgrad=True), 
                    'binary_crossentropy', metrics=['acc'])
    model.summary()
    wandb.log({'model_params': model.count_params()}, commit=False)

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=50),
        # ReduceLROnPlateau(patience=10, cooldown=5, verbose=1),
        LogBest(),
        WandbCallback(log_gradients=False, training_data=train_set),
    ]

    model.fit(train_set[0], train_set[1], batch_size=64, epochs=800, validation_data=(valid_set[0], valid_set[1]), callbacks=callbacks, shuffle=True)
    model.save(os.path.join(wandb.run.dir, 'final_model.h5'))

    # load best model from wandb and evaluate
    print('Evaluate the BEST model!')

    from tensorflow.keras.models import load_model
    from ekg.layers import LeftCropLike, CenterCropLike
    from ekg.layers.sincnet import SincConv1D

    custom_objects = {
        'SincConv1D': SincConv1D,
        'LeftCropLike': LeftCropLike, 
        'CenterCropLike': CenterCropLike
    }

    model = load_model(os.path.join(wandb.run.dir, 'model-best.h5'),
                        custom_objects=custom_objects, compile=False)

    evaluation.evaluation(model, test_set)
Ejemplo n.º 13
0
def curr_learn_experiment(args):
  """ Run curriculum learning experiment, pre-training on class labels for
  args.class_switch total epochs, then finetuning on species labels for 
  args.epochs total epochs """
  wandb.init(project=args.project_name)

  # NOTE: these absolute paths to the general and specific train and validation
  # data depend on your setup 
  general_train = "curr_learn_25_s_620_100_BY_CLASS/train"
  general_val = "curr_learn_25_s_620_100_BY_CLASS/val"
  specific_train = "curr_learn_25_s_620_100/train"
  specific_val = "curr_learn_25_s_620_100/val"
  
  train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)
  test_datagen = ImageDataGenerator(rescale=1. / 255) 
  
  # initially, the more general model pre-trains on 5 classes
  # (amphibians, birds, insects, mammals, and reptiles)
  # on data generated with general labels (biological/taxonomic class)
  general_model = build_small_cnn(args.dropout, 5, args.class_lr)
  log_model_params(general_model, wandb.config, args, img_width)

  switch_epochs = args.class_switch
  callbacks = [WandbCallback()]
  
  train_generator = train_datagen.flow_from_directory(
    general_train,
    target_size=(img_width, img_height),
    batch_size=args.batch_size,
    class_mode='categorical',
    follow_links=True)

  validation_generator = test_datagen.flow_from_directory(
    general_val,
    target_size=(img_width, img_height),
    batch_size=args.batch_size,
    class_mode='categorical',
    follow_links=True)
  
  # pre-train on first, general label set (5 classes) for switch_epochs
  general_model.fit_generator(
    train_generator,
    steps_per_epoch=args.num_train // args.batch_size,
    epochs=switch_epochs,
    validation_data=validation_generator,
    callbacks=callbacks,
    validation_steps=args.num_valid // args.batch_size)

  # recompile the model such that the final layer can predict 25 output labels (25 species)
  specific_model = build_small_cnn(args.dropout, 25, args.species_lr)
  # copy weights from general to specific model, up until dropout layer
  for i, layer in enumerate(specific_model.layers[:-3]):
    layer.set_weights(general_model.layers[i].get_weights())

  # finetune on second, specific label set (25 species) for finetune_epochs,
  # on data generated with specific labels (biological/taxonomic species)
  finetune_epochs = args.epochs - switch_epochs
  spec_train_generator = train_datagen.flow_from_directory(
    specific_train,
    target_size=(img_width, img_height),
    batch_size=args.batch_size,
    class_mode='categorical',
    follow_links=True)

  spec_validation_generator = test_datagen.flow_from_directory(
    specific_val,
    target_size=(img_width, img_height),
    batch_size=args.batch_size,
    class_mode='categorical',
    follow_links=True)

  specific_model.fit_generator(
    spec_train_generator,
    steps_per_epoch=args.num_train // args.batch_size,
    epochs=finetune_epochs,
    validation_data=spec_validation_generator,
    callbacks=callbacks,
    validation_steps=args.num_valid // args.batch_size)

  save_model_filename = args.model_name + ".h5"
  specific_model.save_weights(save_model_filename)
Ejemplo n.º 14
0
def finetune_base_cnn(args):
  """ Load a pre-trained model and pre-train it for some epochs (args.pretrain_epochs).
  Then freeze learned layers up to args.freeze_layer, and continue training the remaining
  layers for the rest of the epochs (args.epochs) """
  wandb.init(project=args.project_name)
  callbacks = [WandbCallback()]
  
  # basic data augmentation
  train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)
  test_datagen = ImageDataGenerator(rescale=1. / 255)

  # modify image dims depending on base model (only resnet is different)
  if args.initial_model == "resnet":
    base_img_width = resnet_img_dim
    base_img_height = resnet_img_dim
  else:
    base_img_width = img_width
    base_img_height = img_height

  train_generator = train_datagen.flow_from_directory(
    args.train_data,
    target_size=(base_img_width, base_img_height),
    batch_size=args.batch_size,
    class_mode='categorical',
    follow_links=True)

  validation_generator = test_datagen.flow_from_directory(
    args.val_data,
    target_size=(base_img_width, base_img_height),
    batch_size=args.batch_size,
    class_mode='categorical',
    follow_links=True)

  model = load_pretrained_model(args.initial_model, args.fc_size, args.num_classes)
  log_model_params(model, wandb.config, args, base_img_width)
   
  # Pre-training phase 
  #-----------------------
  model.fit_generator(
    train_generator,
    steps_per_epoch=args.num_train // args.batch_size,
    epochs=args.pretrain_epochs,
    validation_data=validation_generator,
    callbacks = callbacks,
    validation_steps=args.num_valid // args.batch_size)

  # optionally show all layers of the base model
  #for i, layer in enumerate(model.layers):
  #  print i, layer.name
  
  # freeze layers up to the freeze_layer index
  for layer in model.layers[:args.freeze_layer]:
    layer.trainable = False
  for layer in model.layers[args.freeze_layer:]:
    layer.trainable = True

  # recompile model
  model.compile(optimizer=optimizers.SGD(lr=args.learning_rate, momentum=args.momentum), loss='categorical_crossentropy', metrics=["accuracy"])

  # Finetuning phase
  #-----------------------  
  model.fit_generator(
    train_generator,
    steps_per_epoch=args.num_train // args.batch_size,
    epochs=args.epochs,
    validation_data=validation_generator,
    callbacks = callbacks,
    validation_steps=args.num_valid // args.batch_size)

  save_model_filename = args.model_name + ".h5"
  model.save_weights(save_model_filename)
Ejemplo n.º 15
0
           activation='relu',
           input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

# In[10]:

model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# In[ ]:

model.fit(
    X_train,
    y_train,
    epochs=config.epochs,
    verbose=1,
    validation_data=(X_test, y_test),
    callbacks=[WandbCallback(data_type="image", labels=signdata.letters)])

# In[ ]:

test_loss, test_acc = model.evaluate(X_test, y_test)

print('Test accuracy:', test_acc)
train_dir = "dogcat-data/train"
val_dir = "dogcat-data-small/validation"

nb_train_samples = get_nb_files(train_dir)
nb_classes = len(glob.glob(train_dir + "/*"))
nb_val_samples = get_nb_files(val_dir)

# data prep
train_generator, validation_generator = generators(preprocess_input, config.img_width, config.img_height, config.batch_size)

# setup model
base_model = InceptionV3(weights='imagenet', include_top=False) #include_top=False excludes final FC layer
model = add_new_last_layer(base_model, nb_classes)
model._is_graph_network = False

# fine-tuning
setup_to_finetune(model)

model.fit_generator(
    train_generator,
    epochs=config.epochs,
    workers=2,
    steps_per_epoch=nb_train_samples * 2 / config.batch_size,
    validation_data=validation_generator,
    validation_steps=nb_train_samples / config.batch_size,
    callbacks=[WandbCallback(data_type="image", generator=validation_generator, labels=['cat', 'dog'],save_model=False)],
    class_weight='auto')

model.save('transfered.h5')
Ejemplo n.º 17
0
    shuffle=False,
    steps_per_epoch=len(df_train) / BATCH_SIZE)

val_generator = validation_datagen.flow_from_dataframe(
    dataframe=df_validate,
    x_col="file",
    y_col="hr",
    batch_size=BATCH_SIZE,
    shuffle=False,
    target_size=INPUT_SHAPE[:2],
    class_mode='raw')

run_name = wandb.run.name

callbacks = [WandbCallback(
    training_data=train_generator,
    validation_data=val_generator,
    input_type="images"),
    ModelCheckpoint(
        filepath=f"models/{run_name}/",
        save_weights_only=True,
        mode='min',
        save_best_only=True),
    ReduceLROnPlateau(factor=0.001, patience=15)
    # LearningRateScheduler(scheduler)
]


def weight_image(image_pixels):
    weight_map = np.load("report_scripts/variance_based_weights_single.npy")
    img = image_pixels * weight_map
    return img
Ejemplo n.º 18
0
def main():
    print('LOAD PARAMETERS')
    args = parse_args()
    cfg_params = parse_params(args.config)
    params_train = cfg_params['train']
    params_model = cfg_params['model']
    params_dataloader = cfg_params['dataloader']
    params_generator = cfg_params['generator']

    tensorboard_save_path, weights_save_file_path, plots_save_path = create_save_folders(
        cfg_params['general'])

    work_dir_path = os.path.join(cfg_params['general']['work_dir'],
                                 cfg_params['general']['project_name'])
    weights_save_path = os.path.join(work_dir_path, 'weights/')

    initial_lr = params_train['learning_rate']
    decay_factor = params_train['decay_factor']
    step_size = params_train['step_size']

    if params_dataloader['validate']:
        callback_monitor = 'val_loss'
    else:
        callback_monitor = 'loss'

    print('LOADING COMPLETED')
    callbacks = [
        LearningRateScheduler(
            lambda x: initial_lr * decay_factor**np.floor(x / step_size)),
        ReduceLROnPlateau(monitor=callback_monitor,
                          factor=0.1,
                          patience=4,
                          verbose=1),
        EarlyStopping(monitor=callback_monitor, patience=10, verbose=1),
        ModelCheckpoint(filepath=weights_save_file_path,
                        monitor=callback_monitor,
                        save_best_only=True,
                        verbose=1)
    ]

    print('CREATE DATALOADER')
    data_loader = ENDataLoader(**params_dataloader)
    print('DATALOADER CREATED!')

    if cfg_params['general']['tensorboard_callback']:
        callbacks.append(TensorBoard(log_dir=tensorboard_save_path))

    if cfg_params['general']['wandb_callback']:
        import wandb
        from wandb.keras import WandbCallback
        wandb.init()
        callbacks.append(
            WandbCallback(data_type="image", labels=data_loader.class_names))

    val_generator = None
    print('CREATE MODEL AND DATA GENETATORS')
    if params_model['mode'] == 'siamese':
        model = SiameseNet(cfg_params, training=True)
        train_generator = SiameseDataGenerator(
            class_files_paths=data_loader.train_data,
            class_names=data_loader.class_names,
            **params_generator)
        if data_loader.validate:
            val_generator = SiameseDataGenerator(
                class_files_paths=data_loader.val_data,
                class_names=data_loader.class_names,
                val_gen=True,
                **params_generator)
        losses = {'output_siamese': contrastive_loss}
        metric = {'output_siamese': accuracy}
    else:
        if cfg_params['general']['gpu_ids']:
            print('Multiple gpu mode')
            gpu_ids = cfg_params['general']['gpu_ids']
            os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
            os.environ["CUDA_VISIBLE_DEVICES"] = gpu_ids
            print(f'Using gpu ids: {gpu_ids}')
            gpu_ids_list = gpu_ids.split(',')
            n_gpu = len(gpu_ids_list)
        else:
            os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
            os.environ["CUDA_VISIBLE_DEVICES"] = '0'
            n_gpu = 1
            print('Use single gpu mode')

        model = TripletNet(cfg_params, training=True)
        if n_gpu > 1:
            strategy = tf.distribute.MirroredStrategy()
            with strategy.scope():
                model.base_model = multi_gpu_model(model.base_model,
                                                   gpus=n_gpu)
            # model.base_model = tf.keras.utils.multi_gpu_model(model.base_model, gpus=n_gpu)

        train_generator = TripletsDataGenerator(
            embedding_model=model.base_model,
            class_files_paths=data_loader.train_data,
            class_names=data_loader.class_names,
            **params_generator)

        if data_loader.validate:
            val_generator = SimpleTripletsDataGenerator(
                data_loader.val_data, data_loader.class_names,
                **params_generator)
        losses = triplet_loss(params_generator['margin'])
        metric = ['accuracy']
    print('DONE')

    if args.resume_from is not None:
        model.load_model(args.resume_from)

    print('COMPILE MODEL')
    model.model.compile(loss=losses,
                        optimizer=params_train['optimizer'],
                        metrics=metric)

    if 'softmax' in cfg_params:
        params_softmax = cfg_params['softmax']
        params_save_paths = cfg_params['general']
        pretrain_backbone_softmax(model.backbone_model, data_loader,
                                  params_softmax, params_save_paths)

    history = model.model.fit_generator(train_generator,
                                        validation_data=val_generator,
                                        epochs=params_train['n_epochs'],
                                        callbacks=callbacks,
                                        verbose=1,
                                        use_multiprocessing=False)

    if params_train['plot_history']:
        plot_grapths(history, plots_save_path)
Ejemplo n.º 19
0
#model.add(Permute((3,1,2,4)))
#model.add(ConvLSTM2D(16, (3,3), padding='same', recurrent_dropout=0.2, dropout=0.2, activation='tanh', #recurrent_activation='hard_sigmoid', return_sequences=False))
#model.add(Conv2D(32,(3,3),padding='same',activation='relu'))
#model.add(BatchNormalization())
#model.add(Conv2D(3,(3,3),padding='same'))


def perceptual_distance(y_true, y_pred):
    rmean = (y_true[:, :, :, 0] + y_pred[:, :, :, 0]) / 2
    r = y_true[:, :, :, 0] - y_pred[:, :, :, 0]
    g = y_true[:, :, :, 1] - y_pred[:, :, :, 1]
    b = y_true[:, :, :, 2] - y_pred[:, :, :, 2]

    return K.mean(
        K.sqrt((((512 + rmean) * r * r) / 256) + 4 * g * g +
               (((767 - rmean) * b * b) / 256)))


model.compile(optimizer='adam',
              loss=[perceptual_distance],
              metrics=[perceptual_distance])
model.summary()

model.fit_generator(
    my_generator(config.batch_size, train_dir),
    steps_per_epoch=len(glob.glob(train_dir + "/*")) // config.batch_size,
    epochs=config.num_epochs,
    callbacks=[ImageCallback(), WandbCallback()],
    validation_steps=len(glob.glob(val_dir + "/*")) // config.batch_size,
    validation_data=my_generator(config.batch_size, val_dir))
seq1.add(Flatten(input_shape=(28,28)))
seq1.add(Dense(128, activation='relu'))

seq2 = Sequential()
seq2.add(Flatten(input_shape=(28,28)))
seq2.add(Dense(128, activation='relu'))

merge_layer = Concatenate()([seq1.output, seq2.output])
dense_layer = Dense(1, activation="sigmoid")(merge_layer)
model = Model(inputs=[seq1.input, seq2.input], outputs=dense_layer)

model.compile(loss = "binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()

wandb.init(project="siamese")
model.fit([pairs_train[:,0], pairs_train[:,1]], labels_train[:], batch_size=16, epochs= 10, callbacks=[WandbCallback()])

input = Input((28,28))   
x = Flatten()(input)
x = Dense(128, activation='relu')(x)    # add dense layer of neural network to the model
dense = Model(input, x)

input1 = Input((28,28))
input2 = Input((28,28))

dense1 = dense(input1)
dense2 = dense(input2)

merge_layer = Concatenate()([dense1, dense2])
dense_layer = Dense(1, activation="sigmoid")(merge_layer)
model = Model(inputs=[input1, input2], outputs=dense_layer)
Ejemplo n.º 21
0
        m.compile(optimizer=opt,
                  loss="sparse_categorical_crossentropy",
                  metrics=['accuracy'])

        log_dir = "logs/fit/" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S")
        callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                  histogram_freq=1)

        m.summary()
        m.fit(x_train,
              y_train,
              validation_data=(x_val, y_val),
              batch_size=32,
              epochs=120,
              callbacks=[WandbCallback()])

        test_err, test_acc = m.evaluate(x_test, y_test, verbose=1)
        print("Accuracy on testing data", test_acc)

        model_json = m.to_json()
        with open("model.json", "w") as json_file:
            json_file.write(model_json)
        print("Saved model to disk")
        m.save_weights("model.h5")

    else:
        json_file = open('model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = keras.models.model_from_json(loaded_model_json)
Ejemplo n.º 22
0
def create_model_and_train(n_layers, n_filters, load_model_path = None):

    if load_model_path:
        model = load_model(load_model_path)

    else:
        skip_layers = []

        # First layer
        input_gray = Input(shape = (config.height, config.width))  # same as Y channel
        CrCb = Reshape((config.height, config.width,1))(input_gray)
        if config.l2_loss:
            CrCb = Conv2D(n_filters, (3, 3), activation='relu', padding='same',
                          kernel_regularizer=regularizers.l2(config.l2_loss),
                          bias_regularizer=regularizers.l2(config.l2_loss))(CrCb)
        else:
            CrCb = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(CrCb)
        CrCb = BatchNormalization()(CrCb)
        if config.l2_loss:
            CrCb = SeparableConv2D(n_filters, (3, 3), activation='relu', padding='same',
                                   depthwise_regularizer=regularizers.l2(config.l2_loss),
                                   pointwise_regularizer=regularizers.l2(config.l2_loss),
                                   bias_regularizer=regularizers.l2(config.l2_loss))(CrCb)
        else:                  
            CrCb = SeparableConv2D(n_filters, (3, 3), activation='relu', padding='same')(CrCb)
        CrCb = BatchNormalization()(CrCb)

        # Down layers
        for n_layer in range(1, n_layers):
            skip_layers.append(CrCb)
            n_filters *= 2
            CrCb = MaxPooling2D(2,2)(CrCb)
            for i in range(2):
                if config.l2_loss:
                    CrCb = SeparableConv2D(n_filters, (3, 3), activation='relu', padding='same',
                                        depthwise_regularizer=regularizers.l2(config.l2_loss),
                                        pointwise_regularizer=regularizers.l2(config.l2_loss),
                                        bias_regularizer=regularizers.l2(config.l2_loss))(CrCb)
                else:                  
                    CrCb = SeparableConv2D(n_filters, (3, 3), activation='relu', padding='same')(CrCb)
                CrCb = BatchNormalization()(CrCb)

        # Up layers are made of Transposed convolution + 2 sets of separable convolution
        for n_layer in range(1, n_layers):
            n_filters //= 2
            if config.l2_loss:
                CrCb = Conv2DTranspose(n_filters, (2, 2), strides=2, activation='relu', padding='same',
                                       kernel_regularizer=regularizers.l2(config.l2_loss),
                                       bias_regularizer=regularizers.l2(config.l2_loss))(CrCb)
            else:
                CrCb = Conv2DTranspose(n_filters, (2, 2), strides=2, activation='relu', padding='same')(CrCb)
            CrCb = concatenate([CrCb, skip_layers[-n_layer]], axis = -1)
            CrCb = BatchNormalization()(CrCb)
            for i in range(2):
                if config.l2_loss:
                    CrCb = SeparableConv2D(n_filters, (3, 3), activation='relu', padding='same',
                                        depthwise_regularizer=regularizers.l2(config.l2_loss),
                                        pointwise_regularizer=regularizers.l2(config.l2_loss),
                                        bias_regularizer=regularizers.l2(config.l2_loss))(CrCb)
                else:                  
                    CrCb = SeparableConv2D(n_filters, (3, 3), activation='relu', padding='same')(CrCb)
                CrCb = BatchNormalization()(CrCb)
 
        # Create output classes
        if config.l2_loss:
            CrCb = Conv2D(2, (1, 1), activation='tanh', padding='same',
                          kernel_regularizer=regularizers.l2(config.l2_loss),
                          bias_regularizer=regularizers.l2(config.l2_loss))(CrCb)
        else:
            CrCb = Conv2D(2, (1, 1), activation='tanh', padding='same')(CrCb)

        model = Model(inputs=input_gray, outputs = CrCb)
    
    # Set optimizer
    adam = optimizers.Adam(lr=config.learning_rate)
    model.compile(optimizer=adam, loss='mse')

    # Load validation data
    (val_bw_images, val_color_images) = next(generator(images_per_val_epoch, valid_dir))

    # Train
    model.fit_generator(generator(config.batch_size, train_dir, training=True),
                        steps_per_epoch=int(images_per_train_epoch / config.batch_size),
                        epochs=config.num_epochs, callbacks=[WandbCallback(data_type='image', predictions=16),
                        ModelCheckpoint(filepath = 'model/weights.{epoch:03d}.hdf5')],  # TODO add datetime
                        validation_data=(val_bw_images, val_color_images))
            embedding_matrix[index] = embedding_vector


'''
model = Sequential()
model.add(Embedding(config.vocab_size, 100, input_length=config.maxlen, weights=[embedding_matrix], trainable=False))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(100,activation="relu"))
model.add(Dropout(0.6))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
'''
model = Sequential()
model.add(Embedding(config.vocab_size, 100, input_length=config.maxlen))
model.add(CuDNNLSTM(config.hidden_dims))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
model.fit(X_train, y_train,
          batch_size=config.batch_size,
          epochs=config.epochs,
          validation_data=(X_test, y_test), callbacks=[WandbCallback()])




Ejemplo n.º 24
0
    return cached['train_faces'], cached['train_emotions'], cached['val_faces'], cached['val_emotions']


# loading dataset
train_faces, train_emotions, val_faces, val_emotions = load_fer2013()
num_samples, num_classes = train_emotions.shape

train_faces /= 255.
val_faces /= 255.

# Define the model here, CHANGEME
inp = tf.keras.Input(input_shape)
x = tf.keras.layers.Flatten()(inp)
x = tf.keras.layers.Dense(num_classes, activation="softmax")(x)
model = tf.keras.Model(inp, x)
model.compile(optimizer='adam', loss='categorical_crossentropy',
              metrics=['accuracy'])

# log the number of total parameters
config.total_params = model.count_params()
model.fit(train_faces, train_emotions, batch_size=config.batch_size,
          epochs=config.num_epochs, verbose=1, callbacks=[
              Perf(val_faces),
              WandbCallback(data_type="image", labels=[
                            "Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"])
          ], validation_data=(val_faces, val_emotions))

# save the model
model.save("emotion.h5")
Ejemplo n.º 25
0
config = wandb.config
config.batch_size = 128
config.epochs = 10
config.learn_rate = 1000

class_names = [
    'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
    'ship', 'truck'
]
num_classes = len(class_names)

(X_train, y_train), (X_test, y_test) = cifar10.load_data()

# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Flatten())
model.add(Dense(num_classes))
model.compile(loss='mse',
              optimizer=Adam(config.learn_rate),
              metrics=['accuracy'])

model.fit(X_train,
          y_train,
          epochs=10,
          batch_size=128,
          validation_data=(X_test, y_test),
          callbacks=[WandbCallback(data_type="image", labels=class_names)])
Ejemplo n.º 26
0
def test_no_init():
    with pytest.raises(wandb.errors.Error):
        WandbCallback()
Ejemplo n.º 27
0
wb.login()

wb.init(project='bird_ID', config={'lr': 1e-3, 'bs': 32})
config = wb.config

keras.backend.clear_session()

model = tf.keras.Sequential([
            layers.Conv2D(filters=32, kernel_size=(4,4), strides=1, activation='relu', input_shape=(284, 257, 1)),
            layers.MaxPool2D(pool_size=(4,4)),
            layers.Conv2D(filters=64, kernel_size=(4,4), strides=1, activation='relu'),
            layers.MaxPool2D(pool_size=(4,4)),
            layers.Flatten(),
            layers.Dense(64, activation='relu'),
            layers.Dense(3)
])

model.summary()
model.compile(optimizer=tf.optimizers.Adam(learning_rate=config.lr), loss=losses.SparseCategoricalCrossentropy(from_logits=True), metrics='accuracy')

AUTOTUNE = tf.data.AUTOTUNE

train_ds_ = train_ds.shuffle(500, seed=seed).cache().prefetch(AUTOTUNE).batch(config.bs)
val_ds_ = val_ds.shuffle(500, seed=seed).cache().prefetch(AUTOTUNE).batch(config.bs)

#LargeDataset
model.fit(train_ds_, epochs=2, validation_data=val_ds_, callbacks=[WandbCallback()])

wb.finish()

Ejemplo n.º 28
0
# one hot encode outputs
print("BEFORE")
print(y_train)
print("Size = ")
print(y_train.size)
y_train = np_utils.to_categorical(y_train)
print("AFTER")
print(y_train)
print("Size=")
print(y_train.size)
y_test = np_utils.to_categorical(y_test)
labels = range(10)

num_classes = y_train.shape[1]

# create model
model=Sequential()
model.add(Flatten(input_shape=(img_width,img_height)))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax')) #when having categorization, using categorical cross entropy, num_classes=num digits
model.compile(loss='categorical_crossentropy', optimizer='adam',
                metrics=['accuracy'])

# Fit the model
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test),
                    callbacks=[WandbCallback(validation_data=X_test, labels=labels)])


Ejemplo n.º 29
0
num_classes = y_test.shape[1]

sgd = SGD(lr=config.learn_rate,
          decay=config.decay,
          momentum=config.momentum,
          nesterov=True)

# build model
model = Sequential()
model.add(
    Conv2D(config.layer_1_size, (5, 5),
           activation='relu',
           input_shape=(img_width, img_height, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(config.layer_2_size, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(config.dropout))
model.add(Flatten())
model.add(Dense(config.hidden_layer_size, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

# Add Keras WandbCallback
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
model.fit(X_train,
          y_train,
          validation_data=(X_test, y_test),
          epochs=config.epochs,
          callbacks=[WandbCallback(data_type="image", labels=labels)])
Ejemplo n.º 30
0
        a = dataset[i:(i+config.look_back)]
        dataX.append(a)
        dataY.append(dataset[i + config.look_back])
    return np.array(dataX), np.array(dataY)


data = load_data()

# normalize data to between 0 and 1
max_val = max(data)
min_val = min(data)
data = (data-min_val)/(max_val-min_val)

# split into train and test sets
split = int(len(data) * 0.70)
train = data[:split]
test = data[split:]

trainX, trainY = create_dataset(train)
testX, testY = create_dataset(test)

trainX = trainX[:, :, np.newaxis]
testX = testX[:, :, np.newaxis]

# create and fit the RNN
model = Sequential()
model.add(SimpleRNN(1, input_shape=(config.look_back, 1)))
model.compile(loss='mse', optimizer='adam')
model.fit(trainX, trainY, epochs=500, batch_size=1, validation_data=(testX, testY),  callbacks=[
          WandbCallback(), PlotCallback(trainX, trainY, testX, testY, config.look_back)])