Esempio n. 1
0
    def fit(self,
            x_train,
            y_train,
            batch_size=32,
            epochs=100,
            validation_data=None,
            class_weight=None):

        history = self.model.fit(x=x_train,
                                 y=y_train,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 callbacks=self.callback_list,
                                 validation_data=validation_data)

        utils.plot_metrics(history.history)
Esempio n. 2
0
def main(epochs, load_data, opt_adam, lr, weight_decay, read_from_saved,
         read_from_file, force_cpu, nosave):
    if force_cpu:
        device = torch.device("cpu")
    else:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    dtype = torch.float
    print("Running on:", device)

    if read_from_file is not None:
        network = SNN.from_file(read_from_file)
    elif read_from_saved:
        network = SNN.from_file('logs/save_network.net',
                                device=device,
                                dtype=dtype)
    else:
        network = SNN.from_last_checkpoint(device=device, dtype=dtype)
    network.time_expector = time_expector
    # network.notifier = notify # FIXME

    if opt_adam:
        opt = RAdam(network.get_trainable_parameters(lr, weight_decay))
    else:
        opt = torch.optim.SGD(network.get_trainable_parameters(
            lr, weight_decay),
                              lr=lr,
                              momentum=0.9)

    if not nosave:
        with open('logs/results.log', 'a+') as f:
            res_metrics = network.fit(load_data,
                                      epochs=epochs,
                                      optimizer=opt,
                                      result_file=f,
                                      save_checkpoints=True)
    else:
        res_metrics = network.fit(load_data,
                                  epochs=epochs,
                                  optimizer=opt,
                                  save_checkpoints=False)

    plot_metrics(res_metrics,
                 save_plot_path=None if nosave else './logs/metrics_C_')
    if not nosave:
        network.save('logs/save_network.net')
Esempio n. 3
0
def run_modelpat(train_generatorp, test_generatorp, val_generatorp):

    adam = Adam(lr=3e-5)
    #randomly shuffling training data
    #X_train , y_train = shuffle(X_train , y_train)
    scores = []

    print("current running model is model pat ")
    model = make_unet(img_shape)
    #model.load_weights('/tmp/weights25th_logging'+str(i)+'.hdf5')
    model.compile(optimizer=adam,
                  loss=soft_dice_loss,
                  metrics=[dice_coeff, 'acc', sp, sn])
    model.summary()
    save_model_path = 'Other/unetartery.hdf5'
    earlystopper = EarlyStopping(patience=15, verbose=1)
    cp = ModelCheckpoint(filepath=save_model_path,
                         monitor='val_loss',
                         save_best_only=True,
                         verbose=1)

    history = model.fit_generator(
        train_generatorp,
        steps_per_epoch=90,
        epochs=200,
        validation_data=val_generatorp,
        validation_steps=25,
        callbacks=[cp,
                   earlystopper])  # , #earlystopper])#,tensorboard_callback])
    scores.append(
        model.evaluate_generator(test_generatorp,
                                 steps=12,
                                 workers=1,
                                 verbose=1))
    save_all(model, 25)
    plot_metrics(history, 25)
    print("finished model ")
    #model.reset_states()
    return scores, model
Esempio n. 4
0
def train_and_test_model(model, save_dir, data_loader, learning_rate=0.01, 
                       epochs=2, gpu=False, plot=False):

    # initialize weights and biases
    for layer in iter(model.classifier.children()):
        if len(layer.state_dict()) > 0:
            layer.bias.data.fill_(0)
            layer.weight.data.normal_(std=0.01)

    # freeze vgg's parameters so that we can initialize the last layer's weights
    for param in model.parameters():
        param.requires_grad = False

    # enable training the classifier with the new output
    for param in model.classifier[-1].parameters():
        param.requires_grad_()

    best_acc_epoch = 0
    best_acc = 0.0
    avg_loss_val = 0
    avg_acc_val = 0
    train_losses = []
    train_accuracy = []
    valid_losses = []
    valid_accuracy = []
    best_model_wts = copy.deepcopy(model.state_dict())

    print("TRAINING MODEL with following parameters")
    print("\tsave_dir (for checkpoints):", save_dir)
    print("\tlearning_rate:", learning_rate)
    print("\tepochs:", epochs)
    print("\tgpu:", gpu)

    if gpu and not torch.cuda.is_available():
        print("Sorry, no cuda enabled gpu is available.")
        print("Training on the cpu...")
        gpu = False

    if gpu:
        model.cuda()
    
    criterion = nn.CrossEntropyLoss()
    optimizer_ft = optim.SGD(model.classifier[-1].parameters(), lr=0.01, momentum=0.9)

    since = time.time()
    for e in range(epochs):
        for phase in ['train', 'valid']:
            correct = 0
            running_loss = 0
            for inputs,labels in data_loader[phase]:
                batch_loss, batch_correct = run_batch(phase, gpu, inputs, labels, 
                                             optimizer_ft, model, criterion)
                running_loss += batch_loss # loss.item()
                # for computing accuracy
                correct += batch_correct # torch.sum(preds == labels.data)
                # free gpu resources
                torch.cuda.empty_cache()

            epoch_acc = float(correct) / float(dataset_sizes[phase])
                
            if phase == "train":
                train_losses.append(running_loss)
                train_accuracy.append(epoch_acc)
            else: # "valid" 
                valid_losses.append(running_loss)
                valid_accuracy.append(epoch_acc)
                if epoch_acc > best_acc:
                    best_acc = epoch_acc
                    best_acc_epoch = e
                    best_model_wts = copy.deepcopy(model.state_dict())
            
            print("Epoch {}: {}\n\tTotal Loss: {:4f}\n\tAccuracy: {:d}/{:d} = {:4f}\n".format(
                e, phase.upper(), running_loss, correct, dataset_sizes[phase], epoch_acc))                                                         
             
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    print("Best Accuracy: {} in epoch {}".format(round(best_acc, 3), best_acc_epoch))
    
    if plot:
        utils.plot_metrics(train_losses, valid_losses, train_accuracy, valid_accuracy, e) 

    # Test data
    phase = 'test'
    correct = 0
    for inputs,labels in data_loader[phase]:
        batch_loss, batch_correct = run_batch(phase, gpu, inputs, labels, 
                                     optimizer_ft, model, criterion)
        running_loss += batch_loss 
        # for computing accuracy
        correct += batch_correct 
        # free gpu resources
        torch.cuda.empty_cache()

    test_acc = float(correct) / float(dataset_sizes[phase])
    print("{}\nTotal Loss: {:4f}\nAccuracy: {:d}/{:d} = {:4f}".format(phase.upper(), 
        running_loss, correct, dataset_sizes[phase], test_acc))  

    return model, optimizer_ft, e
Esempio n. 5
0
    def train(self,
              X_train,
              Y_train,
              X_val,
              Y_val,
              max_epochs,
              batch_size,
              learning_rate_init,
              reg_param=0,
              learning_rate_decay_type='inverse',
              learning_rate_decay_parameter=10,
              early_stopping=True,
              save_path='./UNet',
              reset_parameters=False,
              check_val_every_n_batches=5,
              seed=0,
              data_on_GPU=True):
        '''
        Trains the network on the given data, provided as numpy arrays. It is assumed all preprocessing has already been done, including shuffling and splitting of the data into training/validation sets.
        '''

        # (Re)load base graph from file
        print('Loading graph...')
        saver = self.load_graph(save_path)

        with self.G.as_default():

            print('Inserting data augmentation operations...')
            # Load data onto GPU, replace the input placeholder with an index into the data on the GPU (if applicable)
            m_train, height, width, n_channels = X_train.shape
            m_val = X_val.shape[0]
            m = m_train + m_val
            n_classes = Y_train.shape[-1]
            if data_on_GPU:
                X_train_t = tf.constant(X_train, dtype=tf.uint8)
                X_val_t = tf.constant(X_val, dtype=tf.uint8)
                Y_train_t = tf.constant(Y_train, dtype=tf.bool)
                Y_val_t = tf.constant(Y_val, dtype=tf.bool)
                del X_train, X_val, Y_train, Y_val
                train_idx = tf.placeholder_with_default([0], shape=[None])
                X_train_t = tf.gather(X_train_t, train_idx, axis=0)
                Y_train_t = tf.gather(Y_train_t, train_idx, axis=0)
            else:
                X_train_t = tf.placeholder_with_default(np.zeros(
                    [0, height, width, n_channels], dtype=np.uint8),
                                                        shape=self.input.shape,
                                                        name='X_train_input')
                X_val_t = tf.placeholder_with_default(np.zeros(
                    [0, height, width, n_channels], dtype=np.uint8),
                                                      shape=self.input.shape,
                                                      name='X_val_input')
                Y_train_t = tf.placeholder_with_default(
                    np.zeros([0, height, width, n_classes], dtype=np.bool),
                    shape=self.labels.shape,
                    name='Y_train_input')
                Y_val_t = tf.placeholder_with_default(np.zeros(
                    [0, height, width, n_classes], dtype=np.bool),
                                                      shape=self.labels.shape,
                                                      name='Y_val_input')
            # Insert data augmentation steps to graph
            train_or_val_idx = tf.placeholder(dtype=tf.int32, shape=[None])
            X_train_aug, Y_train_aug = self.data_augmentation(
                X_train_t, Y_train_t)
            X = tf.cast(
                tf.gather(tf.concat([X_train_aug, X_val_t], axis=0),
                          train_or_val_idx), tf.float32)
            Y = tf.cast(
                tf.gather(tf.concat([Y_train_aug, Y_val_t], axis=0),
                          train_or_val_idx), tf.float32)
            ge.swap_ts([X, Y],
                       [self.input, self.labels])  # Use X and Y from now on!

            # Write to log file
            with open(str(save_path) + '.log', 'w+') as fo:
                fo.write('Training log\n\n')
                fo.write('Dataset metrics:\n')
                fo.write('Training data shape: {}\n'.format(X_train.shape))
                fo.write('Validation set size: {}\n'.format(m_val))
                #                fo.write('X_mean: {}\n'.format(X_mean))
                #                fo.write('X_std: {}\n\n'.format(X_std))
                fo.write('Hyperparameters:\n')
                fo.write('Batch size: {}\n'.format(batch_size))
                fo.write('Learning rate: {}\n'.format(learning_rate_init))
                fo.write('Learning rate annealed every N epochs: {}\n'.format(
                    learning_rate_decay_parameter))
                fo.write('Learning rate anneal type: {}\n'.format(
                    learning_rate_decay_type))
                #                fo.write('Stepped anneal: {}\n'.format(STEPPED_ANNEAL))
                #                fo.write('Regularization type: {}\n'.format(REGULARIZATION_TYPE))
                fo.write('Regularization parameter: {}\n'.format(reg_param))
                #                fo.write('Input noise variance: {:.2f}\n'.format(INPUT_NOISE_MAGNITUDE**2))
                #                fo.write('Weight noise variance: {:.2f}\n'.format(WEIGHT_NOISE_MAGNITUDE**2))
                #                for n in range(1,len(KEEP_PROB)+1):
                #                    fo.write('Dropout keep prob. group {}: {:.2f}\n'.format(n, KEEP_PROB[n]))
                fo.write('Logging frequency: {} global steps\n'.format(
                    check_val_every_n_batches))
                fo.write('Random seed: {}\n'.format(seed))

            # Initialize control flow variables and logs
#            max_val_accuracy = -1
#            max_val_conf = -1
            best_val_loss = np.inf
            global_step = 0
            #            with open(str(save_path)+'_val_accuracy.log', 'w+') as fo:
            #                fo.write('')
            with open(str(save_path) + '_val_loss.log', 'w+') as fo:
                fo.write('')
#            with open(str(save_path)+'_val_confidence.log', 'w+') as fo:
#                fo.write('')
#            with open(str(save_path)+'_train_accuracy.log', 'w+') as fo:
#                fo.write('')
            with open(str(save_path) + '_train_loss.log', 'w+') as fo:
                fo.write('')
#            with open(str(save_path)+'_train_confidence.log', 'w+') as fo:
#                fo.write('')
            with open(str(save_path) + '_learning_rate.log', 'w+') as fo:
                fo.write('')

            # Start tensorflow session, reset_parameters or reload checkpoint
            print('Starting tensorflow session...')
            with tf.Session() as sess:
                if reset_parameters:
                    saver = tf.train.Saver()
                    sess.run(tf.global_variables_initializer())
                else:
                    saver.restore(sess, save_path)

                uninitialized_vars = []
                for var in tf.global_variables():
                    try:
                        sess.run(var)
                    except tf.errors.FailedPreconditionError:
                        uninitialized_vars.append(var)

                sess.run(tf.variables_initializer(uninitialized_vars))

                # Iterate over training epochs
                best_val_loss = np.inf
                for epoch in range(max_epochs):
                    if learning_rate_decay_type == 'inverse':
                        learning_rate = learning_rate_init / (
                            1 + epoch / learning_rate_decay_parameter)
                    elif learning_rate_decay_type == 'constant':
                        learning_rate = learning_rate_init
                    elif learning_rate_decay_type == 'exponential':
                        learning_rate = learning_rate_init * np.exp(
                            -epoch / learning_rate_decay_parameter)
                    else:
                        raise Exception('Unknown learning rate decay function')

                    # Iterate over batches:
                    n_batches = math.ceil(m_train / batch_size)
                    for b in range(n_batches):
                        train_idx_i = b * batch_size
                        train_idx_f = min((b + 1) * batch_size, m_train)

                        if data_on_GPU:
                            feed_dict = {
                                train_idx: range(train_idx_i, train_idx_f + 1),
                                train_or_val_idx:
                                range(train_idx_f - train_idx_i),
                                self.learning_rate: learning_rate,
                                self.reg_param: reg_param
                            }
                        else:
                            feed_dict = {
                                X_train_t: X_train[train_idx_i:train_idx_f],
                                Y_train_t: Y_train[train_idx_i:train_idx_f],
                                train_or_val_idx:
                                range(train_idx_f - train_idx_i),
                                self.learning_rate: learning_rate,
                                self.reg_param: reg_param
                            }
                        train_loss, _ = sess.run([self.loss, self.train_op],
                                                 feed_dict=feed_dict)
                        print('Epoch {}, batch {}/{}: loss={:.3e}'.format(
                            epoch + 1, b, n_batches, train_loss))
                        if np.isnan(train_loss) or np.isinf(train_loss):
                            print('Detected nan, exiting training')
                            quit()
                            exit()
                            break

                        if (global_step % check_val_every_n_batches) == 0:
                            if data_on_GPU:
                                feed_dict = {
                                    train_or_val_idx: range(1, m_val + 1),
                                    self.reg_param: reg_param
                                }
                            else:
                                feed_dict = {
                                    X_val_t: X_val,
                                    Y_val_t: Y_val,
                                    train_or_val_idx: range(m_val),
                                    self.reg_param: reg_param
                                }
                            val_loss = sess.run(self.loss, feed_dict=feed_dict)
                            if early_stopping and (val_loss < best_val_loss):
                                best_val_loss = val_loss
                                print(
                                    'New best validation loss: {:.3e}! Saving...'
                                    .format(val_loss))
                                saver.save(sess,
                                           save_path,
                                           write_meta_graph=False)

                            # Write to logs everytime validation set run
                            with open(str(save_path) + '_train_loss.log',
                                      'a') as fo:
                                fo.write(str(train_loss) + '\n')
#                                with open(str(save_path)+'_train_accuracy.log', 'a') as fo:
#                                    fo.write(str(train_accuracy)+'\n')
#                                with open(str(save_path)+'_train_confidence.log', 'a') as fo:
#                                    fo.write(str(train_conf)+'\n')
#                                with open(str(save_path)+'_val_accuracy.log', 'a') as fo:
#                                    fo.write(str(val_accuracy)+'\n')
                            with open(str(save_path) + '_val_loss.log',
                                      'a') as fo:
                                fo.write(str(val_loss) + '\n')
#                                with open(str(save_path)+'_val_confidence.log', 'a') as fo:
#                                    fo.write(str(val_conf)+'\n')
                            with open(
                                    str(save_path) + '_learning_rate.log',
                                    'a') as fo:
                                fo.write(str(learning_rate) + '\n')
                            u.plot_metrics(str(save_path))

                        global_step += 1
Esempio n. 6
0
        logger.add_scalar(epoch, 'train.loss', train_loss)
        logger.add_scalar(epoch, 'val.loss', val_loss)
        logger.add_scalar(epoch, 'train.acc', train_acc)
        logger.add_scalar(epoch, 'val.acc', val_acc)
        if args.odenet:
            logger.add_scalar(epoch, 'train.forward_nfe',
                              train_forward_nfe.avg)
            logger.add_scalar(epoch, 'train.backward_nfe',
                              train_backward_nfe.avg)
            logger.add_scalar(epoch, 'val.forward_nfe', val_nfe)
        logger.add_scalar(epoch, 'time', time.time() - t0)
        t0 = time.time()
        logger.iter_info()
        logger.save()

    if epoch % args.vis_each == 0 or epoch == 1 or epoch == args.epochs:
        utils.plot_metrics(logger.scalar_metrics, args.odenet)
        plt.savefig(os.path.join(args.root, 'training_curves.png'))
        plt.close()

torch.save(model.state_dict(), os.path.join(args.root, 'model_final.torch'))
torch.save(optimizer.state_dict(),
           os.path.join(args.root, 'optimizer_final.torch'))

if args.verbose:
    print(model)
    for param in model.named_parameters():
        print(param)

parser.done()
Esempio n. 7
0
def main():
    st.title("Binary Classification Web App")
    st.sidebar.title("Binary Classification Web App")
    st.markdown("Are your mushrooms edible or poisonous? 🍄")
    st.sidebar.markdown("Are your mushrooms edible or poisonous? 🍄")

    df = utils.load_data()
    x_train, x_test, y_train, y_test = utils.split(df)
    class_names = ["edible", "poisonous"]

    st.sidebar.subheader("Choose Classifier")
    classifier = st.sidebar.selectbox(
        "Classifier", ("Support Vector Machine (SVM)", "Logistic Regression",
                       "Random Forest Classification"))

    if classifier == 'Support Vector Machine (SVM)':
        st.sidebar.subheader("Model Hyperparameters")
        C = st.sidebar.number_input("C (Regularization parameter)",
                                    0.01,
                                    10.0,
                                    step=0.01,
                                    key='C')
        kernel = st.sidebar.radio("Kernel", ("rbf", "linear"), key='kernel')
        gamma = st.sidebar.radio("Gamma (Kernel Coefficient)",
                                 ("scale", "auto"),
                                 key='gamma')

        metrics = st.sidebar.multiselect(
            "What matrix to plot?",
            ("Confusion Matrix", "ROC Curve", "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Support Vector Machine (SVM) Results")
            model = SVC(C=C, kernel=kernel, gamma=gamma)
            model.fit(x_train, y_train)
            accuracy = model.score(x_test, y_test)
            y_pred = model.predict(x_test)
            st.write("Accuracy: ", accuracy.round(2))
            st.write(
                "Precision: ",
                precision_score(y_test, y_pred, labels=class_names).round(2))
            st.write("Recall: ",
                     recall_score(y_test, y_pred, labels=class_names).round(2))
            utils.plot_metrics(metrics, model, x_test, y_test, class_names)

    if classifier == 'Logistic Regression':
        st.sidebar.subheader("Model Hyperparameters")
        C = st.sidebar.number_input("C (Regularization parameter)",
                                    0.01,
                                    10.0,
                                    step=0.01,
                                    key='Lr')
        max_iter = st.sidebar.slider("Maximum no. of iterations",
                                     100,
                                     500,
                                     key='max_iter')

        metrics = st.sidebar.multiselect(
            "What matrix to plot?",
            ("Confusion Matrix", "ROC Curve", "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Logistic Regression Results")
            model = LogisticRegression(C=C, max_iter=max_iter)
            model.fit(x_train, y_train)
            accuracy = model.score(x_test, y_test)
            y_pred = model.predict(x_test)
            st.write("Accuracy: ", accuracy.round(2))
            st.write(
                "Precision: ",
                precision_score(y_test, y_pred, labels=class_names).round(2))
            st.write("Recall: ",
                     recall_score(y_test, y_pred, labels=class_names).round(2))
            utils.plot_metrics(metrics, model, x_test, y_test, class_names)

    if classifier == 'Random Forest Classification':
        st.sidebar.subheader("Model Hyperparameters")
        n_estimators = st.sidebar.number_input(
            "This is the number of trees in the forest",
            100,
            5000,
            step=10,
            key='n_estimators')
        max_depth = st.sidebar.number_input("The maximum depth of the tree",
                                            1,
                                            100,
                                            step=2,
                                            key='max_depth')
        bootstrap = st.sidebar.radio("Bootstrap samples when building trees",
                                     ("True", "False"),
                                     key='bootstrap')
        metrics = st.sidebar.multiselect(
            "What matrix to plot?",
            ("Confusion Matrix", "ROC Curve", "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Random Forest Results")
            model = RandomForestClassifier(n_estimators=n_estimators,
                                           max_depth=max_depth,
                                           bootstrap=bootstrap,
                                           n_jobs=-1)
            model.fit(x_train, y_train)
            accuracy = model.score(x_test, y_test)
            y_pred = model.predict(x_test)
            st.write("Accuracy: ", accuracy.round(2))
            st.write(
                "Precision: ",
                precision_score(y_test, y_pred, labels=class_names).round(2))
            st.write("Recall: ",
                     recall_score(y_test, y_pred, labels=class_names).round(2))
            utils.plot_metrics(metrics, model, x_test, y_test, class_names)

    if st.sidebar.checkbox("Show raw data", False):
        st.subheader("Mushroom Data Set (Classification)")
        st.write(df)
            eps_history.append(agent_dqn.epsilon)
            avg_score = np.mean(
                score_history[-100:])  # moving average over last 100 games

            print('game-episode: ', i, 'episode-score: ', score,
                  ' || average-score %.2f' % avg_score,
                  'max-score %.2f' % max_score,
                  ' || epsilon %.2f' % agent_dqn.epsilon, 'num_steps',
                  num_steps)

            if score > max_score:
                max_score = score  # Possible improvemen - short moving max_score average
                agent_dqn.save_network_checkpoints()

        print("Training done!")
        plot_metrics(steps_arr, score_history, eps_history, figure_file_name)

    # Capturing the agent playing
    if create_capture:
        img_arr = []
        agent_dqn.epsilon = 0.0001
        observation = env.reset()

        for i in range(1000):
            frame = np.uint8(observation[0] * 255)
            frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
            img_arr.append(frame)

            action = agent_dqn.choose_action(observation)
            observation, _, _, _ = env.step(action)
Esempio n. 9
0
setproctitle("(hwijeen) word drop")
logging.basicConfig(
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    datefmt='%m/%d/%Y %H:%M:%S',
    level=logging.INFO)
logger = logging.getLogger(__name__)

if __name__ == "__main__":
    DATA_DIR = '/home/nlpgpu5/hwijeen/MulDocSumm/data/'
    FILE = 'rottentomatoes_prepared'
    DEVICE = torch.device('cuda:1')
    EPOCH = 50
    WORD_DROP = 0.2

    data = MulSumData(DATA_DIR, FILE, 99, DEVICE)
    selfattnCVAE = build_SelfAttnCVAE(len(data.vocab),
                                      hidden_dim=600,
                                      latent_dim=300,
                                      enc_bidirectional=True,
                                      word_drop=WORD_DROP,
                                      device=DEVICE)
    trainer = Trainer(selfattnCVAE,
                      data,
                      lr=0.001,
                      to_record=['recon_loss', 'kl_loss'])
    results = trainer.train(num_epoch=EPOCH, verbose=True)

    plot_learning_curve(results['train_losses'], results['valid_losses'])
    plot_metrics(results['train_metrics'], results['valid_metrics'])
    plot_kl_loss(trainer.stats.stats['kl_loss'])
Esempio n. 10
0
def main(config):

    # For Reproducibility #
    random.seed(config.seed)
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(config.seed)

    # Weights and Plots Path #
    paths = [config.weights_path, config.plots_path]
    for path in paths:
        make_dirs(path)

    # Prepare Data Loader #
    if config.dataset == 'cifar':
        train_loader, val_loader, test_loader = cifar_loader(
            config.num_classes, config.batch_size)
        input_size = 32

    # Prepare Networks #
    if config.model == 'vit':
        model = VisionTransformer(in_channels=config.in_channels,
                                  embed_dim=config.embed_dim,
                                  patch_size=config.patch_size,
                                  num_layers=config.num_layers,
                                  num_heads=config.num_heads,
                                  mlp_dim=config.mlp_dim,
                                  dropout=config.drop_out,
                                  input_size=input_size,
                                  num_classes=config.num_classes).to(device)

    elif config.model == 'efficient':
        model = EfficientNet.from_name(
            'efficientnet-b0', num_classes=config.num_classes).to(device)

    elif config.model == 'resnet':
        model = resnet34(pretrained=False).to(device)
        model.fc = nn.Linear(config.mlp_dim, config.num_classes).to(device)

    else:
        raise NotImplementedError

    # Weight Initialization #
    if not config.model == 'efficient':
        if config.init == 'normal':
            model.apply(init_weights_normal)
        elif config.init == 'xavier':
            model.apply(init_weights_xavier)
        elif config.init == 'he':
            model.apply(init_weights_kaiming)
        else:
            raise NotImplementedError

    # Train #
    if config.phase == 'train':

        # Loss Function #
        criterion = nn.CrossEntropyLoss()

        # Optimizers #
        if config.num_classes == 10:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=config.lr,
                                         betas=(0.5, 0.999))
            optimizer_scheduler = get_lr_scheduler(config.lr_scheduler,
                                                   optimizer)
        elif config.num_classes == 100:
            optimizer = torch.optim.SGD(model.parameters(),
                                        lr=config.lr,
                                        momentum=0.9,
                                        weight_decay=5e-4)
            optimizer_scheduler = get_lr_scheduler('step', optimizer)

        # Constants #
        best_top1_acc = 0

        # Lists #
        train_losses, val_losses = list(), list()
        train_top1_accs, train_top5_accs = list(), list()
        val_top1_accs, val_top5_accs = list(), list()

        # Train and Validation #
        print("Training {} has started.".format(model.__class__.__name__))
        for epoch in range(config.num_epochs):

            # Train #
            train_loss, train_top1_acc, train_top5_acc = train(
                train_loader, model, optimizer, criterion, epoch, config)

            # Validation #
            val_loss, val_top1_acc, val_top5_acc = validate(
                val_loader, model, criterion, epoch, config)

            # Add items to Lists #
            train_losses.append(train_loss)
            val_losses.append(val_loss)

            train_top1_accs.append(train_top1_acc)
            train_top5_accs.append(train_top5_acc)

            val_top1_accs.append(val_top1_acc)
            val_top5_accs.append(val_top5_acc)

            # If Best Top 1 Accuracy #
            if val_top1_acc > best_top1_acc:
                best_top1_acc = max(val_top1_acc, best_top1_acc)

                # Save Models #
                print("The best model is saved!")
                torch.save(
                    model.state_dict(),
                    os.path.join(
                        config.weights_path,
                        'BEST_{}_{}_{}.pkl'.format(model.__class__.__name__,
                                                   str(config.dataset).upper(),
                                                   config.num_classes)))

            print("Best Top 1 Accuracy {:.2f}%\n".format(best_top1_acc))

            # Optimizer Scheduler #
            optimizer_scheduler.step()

        # Plot Losses and Accuracies #
        losses = (train_losses, val_losses)
        accs = (train_top1_accs, train_top5_accs, val_top1_accs, val_top5_accs)
        plot_metrics(losses, accs, config.plots_path, model, config.dataset,
                     config.num_classes)

        print("Training {} using {} {} finished.".format(
            model.__class__.__name__,
            str(config.dataset).upper(), config.num_classes))

    # Test #
    elif config.phase == 'test':

        test(test_loader, model, config)

    else:
        raise NotImplementedError
Esempio n. 11
0
def train(G, max_episodes, save_path):
    '''
    Trains a DQN to play pong. Periodically saves progress to a checkpoint file, and saves plots of several metrics to monitor training.
        Input:
            G: computational graph by which the action-value function Q is calculated.
            max_episodes: the maximum number of episodes to run for before terminating training
            save_path: a file path to the location of the checkpoint files
        Output: none
    '''

    # Define some constants, lists, metrics, etc
    action_map = {1: 'x', 2: '^', 3: 'v'}  # Stay, up, down
    replay_memory = u.ReplayMemory(max_exp_len=REPLAY_MEM_LEN)
    step_list = []
    reward_list = []
    avg_reward = None
    val_Q_list = []
    episode_length_list = []
    episode_time_list = []
    avg_episode_length_list = []
    avg_episode_length = None
    episode_score_list = {'player': [], 'computer': []}
    X_val = u.load_validation_screens()

    # Initialize the Pong gym environment, set seeds
    env = gym.make('Pong-v0')
    np.random.seed(SEED)
    tf.set_random_seed(SEED)
    plt.ioff()

    # Gather screens

    # Initialize computational graph
    with G.as_default():
        # Get input/output tensors
        X = G.get_tensor_by_name('X:0')
        Y = G.get_tensor_by_name('Y:0')
        Q = G.get_tensor_by_name('Q:0')
        A = G.get_tensor_by_name('A:0')
        L = G.get_tensor_by_name('L:0')
        LR = G.get_tensor_by_name('LR:0')
        train_op = G.get_operation_by_name('TrainOp')

        saver = tf.train.Saver()

        # Initialize TF session
        with tf.Session() as sess:
            # Reload/initialize variables
            if RELOAD_PARAMETERS:
                print('Reloading from last checkpoint...')
                saver.restore(sess, save_path)
            else:
                print('Initializing variables...')
                sess.run(tf.global_variables_initializer())
            # Iterate over episodes
            global_steps = 0
            for episode in range(max_episodes):
                tic = time.time()
                obs = u.preprocess_image(env.reset())
                for i in range(3):
                    replay_memory.add_frame(
                        np.zeros((160 // DOWNSAMPLE, 160 // DOWNSAMPLE),
                                 dtype=bool))
                replay_memory.add_frame(obs)

                # Iterate over frames
                done = False
                frame = 0
                episode_score = [0, 0]
                while not done:
                    if (global_steps >= OBSERVE_STEPS):
                        # Feed state into DQN
                        s = np.stack(
                            [replay_memory.frames[i] for i in range(-4, 0)],
                            axis=-1).reshape(1, 160 // DOWNSAMPLE,
                                             160 // DOWNSAMPLE, 4)
                        y = sess.run(Y, feed_dict={X: s})

                        # Decide on action
                        epsilon = max(
                            MAX_EPSILON *
                            (1 - global_steps / EPSILON_ANNEALING_STEPS),
                            MIN_EPSILON)
                        if (np.random.rand() < epsilon):
                            a = np.random.choice([1, 2, 3])
                        else:
                            a = np.argmax(y) + 1
                    else:
                        a = np.random.choice([1, 2, 3])

                    # Take action, observe environment, reward
                    obs, r, done, _ = env.step(a)
                    r_sum = r
                    for i in range(STEPS_TO_SKIP):
                        obs, r, done_temp, _ = env.step(1)
                        r_sum += r
                        if done_temp == True:
                            done = True
                    if r_sum > 0:
                        episode_score[0] += int(r_sum)
                    elif r_sum < 0:
                        episode_score[1] -= int(r_sum)

                    # Add new state/reward to replay memory
                    replay_memory.add_frame(u.preprocess_image(obs))
                    experience = (np.stack(list(replay_memory.frames),
                                           axis=-1).astype(bool), a, r_sum,
                                  done)
                    replay_memory.add_exp(experience)

                    # Do training batch update
                    if (global_steps >= OBSERVE_STEPS):
                        S, A_, R, D = replay_memory.sample(BATCH_SIZE)
                        y2 = sess.run(Y, feed_dict={X: S[:, :, :, -4:]})
                        q = R + (1 - D) * GAMMA * np.max(y2, axis=1)
                        _, batch_loss = sess.run(
                            [train_op, L],
                            feed_dict={
                                X: S[:, :, :, -5:-1],
                                Q: q,
                                A: (A_ - 1),
                                LR: LEARNING_RATE
                            })
                        if (batch_loss == np.nan):
                            print('nan error, exiting training')
                            exit()
                        elif (np.mean(np.max(y2, axis=-1)) > 1e2):
                            print('unstable Q value, exiting training')
                            exit()

                        # Print updates
                        print(
                            'Episode: {}/{},\tframe: {},\tscore: {},\t<max(Q)>: {:.3e},\nmax(Q): {:.3e},\taction: {},\tcurrent std(Q)/mean(Q): {:.3e}'
                            .format(episode + 1, max_episodes,
                                    (frame + 1) * (STEPS_TO_SKIP + 1),
                                    episode_score, np.mean(np.max(y2,
                                                                  axis=-1)),
                                    np.max(y), action_map[a],
                                    np.std(y) / np.mean(y)))

                        # Plot frame-by-frame metrics
                        if avg_reward is None:
                            avg_reward = r_sum
                        else:
                            avg_reward = (1 -
                                          np.exp(-1 / 500)) * r_sum + np.exp(
                                              -1 / 500) * avg_reward
                        if (global_steps % PLOT_EVERY_N_STEPS == 0):
                            step_list.append(global_steps)
                            reward_list.append(10 * avg_reward)
                            y_val = sess.run(Y, feed_dict={X: X_val})
                            val_Q_list.append(np.mean(np.max(y_val, axis=-1)))
                            u.plot_metrics(step_list, 'PongMetrics',
                                           'Pong Metrics', 'Global step', '',
                                           (val_Q_list, 'Validation <max(Q)>'),
                                           (reward_list, '10*<R>'))
                    else:
                        print('Observation step {}/{}'.format(
                            global_steps, OBSERVE_STEPS))

                    # Update state variables
                    global_steps += 1
                    frame += 1

                # Save parameters at end of episode, plot episode metrics
                print('Saving parameters...')
                saver.save(sess, SAVE_PATH)
                episode_length_list.append(frame * (STEPS_TO_SKIP + 1) / 1000)
                if avg_episode_length is None:
                    avg_episode_length = frame * (STEPS_TO_SKIP + 1)
                else:
                    avg_episode_length = (1 - np.exp(-1 / 10)) * frame * (
                        STEPS_TO_SKIP + 1) + np.exp(
                            -1 / 10) * avg_episode_length
                avg_episode_length_list.append(avg_episode_length / 1000)
                toc = time.time()
                episode_time_list.append((toc - tic) / 60)
                episode_score_list['player'].append(episode_score[0])
                episode_score_list['computer'].append(episode_score[1])
                u.plot_metrics(range(episode + 1), 'EpisodeLength',
                               'Episode Length', 'Episode', 'Steps/1000',
                               (episode_length_list, 'Steps/episode'),
                               (avg_episode_length_list, 'Average'))
                u.plot_metrics(range(episode + 1), 'EpisodeScore',
                               'Episode Score', 'Episode', 'Score',
                               (episode_score_list['player'], 'Player'),
                               (episode_score_list['computer'], 'Computer'))
                u.plot_metrics(range(episode + 1), 'EpisodeTime',
                               'Episode time', 'Episode', 'Time (min)',
                               (episode_time_list, 'Episode time'))
Esempio n. 12
0
def main():
    utils.local_css("css/styles.css")
    st.title("Heart Disease Prediction - Manual Parameter Tuner")
    st.sidebar.title("Manual Parameter Tuning")
    st.markdown("### Machine Learning is not only about the algorithms you use but also about the different Parameters "
                "assigned to each of them. The final model is heavily affected by the parameters used for a specific "
                "algorithm. "
                "\nThis interactive web app will help you to explore the various parameters of different ML algorithms."
                "\nThe different ML models presented here are:"
                "\n* Logistic Regression"
                "\n* Support Vector Classifier"
                "\n* k-Nears Neighbour Classifier"
                "\n* Decision Tree Classifier"
                "\n* Random Forest Classifier"
                "\n* Gradient Boosting Classifier"
                "\n* XGBoost Classifier"
                "\n### The dataset used here is the **Framingham** Coronary Heart Disease dataset publicly available "
                "at [Kaggle](https://www.kaggle.com/amanajmera1/framingham-heart-study-dataset)."
                "\n## About the Dataset:"
                "\nThe **Framingham** dataset is from an ongoing cardiovascular study"
                " on residents of the town of Framingham, Massachusetts. The classification goal is "
                "to predict whether the patient has 10-year risk of future coronary heart disease (CHD). The dataset "
                "provides the patients’ information. It includes over 4,240 records and 15 attributes."
                "\n ### Even after optimizing parameters, the model would only work properly if accurate data is provided to it."
                " So, through this web app, the users will be able to get a feel of hyperparameter tuning but only on this specific dataset."
                "\n ## Head to the *Manual Parameter Tuning* section to get started!")

    st.sidebar.markdown("Manually select the model you want to view and use the interactive text boxes, sliding bars "
                        "and buttons to tune the respective models. More than one options are provided for each model"
                        " and you can view and gain insight on how hyper-parameter tuning works. Enjoy exploring!")

    data = pd.read_csv("Dataset/framingham.csv")
    data = utils.preprocess(data)

    st.sidebar.markdown("\n#### Exploratory Data Analysis:")
    viz_list = st.sidebar.multiselect("(Be sure to Clear off all the selected options here before moving on for faster response)",
                                      ('Categorical Visualisation',
                                       'Numerical Visualisation',
                                       'sysBP and diaBP Visualisation'))
    utils.visualize(viz_list, data)
    if st.sidebar.checkbox("View raw and preprocessed data", False):
        st.subheader("Raw-preprocessed Data")
        st.write(data)

    st.sidebar.markdown("\n#### Feature Selection:")
    feature = st.sidebar.radio("Feature selection using chi-squared test", ("Don't select features",
                                                                            "Select Features"), key='feature')
    if feature == "Don't select features":
        st.markdown("### Feature Selection is not done!")
    else:
        st.markdown("Best 10 features along with their chi-squared score")
        score, data = utils.feature_selection(data)
        st.write(score)
        if st.sidebar.checkbox("Plot Feature Selection", False):
            utils.plot_feature_selection(score)


    train_x, test_x, train_y, test_y = utils.split_and_scale(data)

    class_names = ["Has Heart Disease", "Doesn't have Heart Disease"]
    st.sidebar.subheader("Choose Classifier")
    classifier = st.sidebar.selectbox("Classifier", ("Logistic Regression",
                                                     "Support Vector Classifier",
                                                     "k-Nears Neighbour Classifier",
                                                     "Decision Tree Classifier",
                                                     "Random Forest Classifier",
                                                     "Gradient Boosting Classifier",
                                                     "XGBoost Classifier"))

    if classifier == "Logistic Regression":
        st.sidebar.subheader("Model Hyperparameters")
        C = st.sidebar.number_input("C (Regularization parameter)", 0.01, 10.0, step=0.01, key='Lr')
        max_iter = st.sidebar.slider("Maximum no. of Iterations", 100, 500, key='max_iter')

        metrics = st.sidebar.multiselect("What matrix to plot?", ("Confusion Matrix", "ROC Curve",
                                                                  "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Logistic Regression Results")
            y_pred, accuracy, models = model.LR(train_x, test_x, train_y, test_y, C=C, max_iter=max_iter)
            st.write("Accuracy: ", accuracy.round(3))
            st.write("Precision: ", precision_score(test_y, y_pred, labels=class_names).round(3))
            st.write("Recall: ", recall_score(test_y, y_pred, labels=class_names).round(3))
            utils.plot_metrics(metrics, models, test_x, test_y, class_names)

    if classifier == "Support Vector Classifier":
        st.sidebar.subheader("Model Hyperparameters")
        C = st.sidebar.number_input("C (Regularization parameter)", 0.01, 10.0, step=0.01, key='C')
        gamma = st.sidebar.radio("Gamma (for non linear hyperplanes)", ("auto", "scale"), key='gamma')
        kernel = st.sidebar.radio("Kernel (type of hyperplane)", ("linear", "rbf", "poly"), key='kernel')
        degree = 3
        if kernel == 'poly':
            degree = st.sidebar.number_input("Degree of the polynomial used to find the hyperplane", 1, 10, step=1,
                                             key='degree')
        metrics = st.sidebar.multiselect("What matrix to plot?", ("Confusion Matrix", "ROC Curve",
                                                                  "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Support Vector Classification Results")
            y_pred, accuracy, models = model.SVM(train_x, test_x, train_y, test_y, C=C, gamma=gamma, kernel=kernel,
                                                 degree=degree)
            st.write("Accuracy: ", accuracy.round(3))
            st.write("Precision: ", precision_score(test_y, y_pred, labels=class_names).round(3))
            st.write("Recall: ", recall_score(test_y, y_pred, labels=class_names).round(3))
            utils.plot_metrics(metrics, models, test_x, test_y, class_names)

    if classifier == "k-Nears Neighbour Classifier":
        st.sidebar.subheader("Model Hyperparameters")
        n = st.sidebar.number_input("n_neighbors (Number of nearest neighbors)", 1, 20, step=1, key='n')
        leaf_size = st.sidebar.slider("Leaf Size", 10, 200, key='leaf_size')
        algorithm = st.sidebar.radio("Algorithm to use", ("ball_tree", "kd_tree", "auto"), key='algorithm')

        metrics = st.sidebar.multiselect("What matrix to plot?", ("Confusion Matrix", "ROC Curve",
                                                                  "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("kNN Classification Results")
            y_pred, accuracy, models = model.KNN(train_x, test_x, train_y, test_y, n=n, leaf_size=leaf_size,
                                                 algorithm=algorithm)
            st.write("Accuracy: ", accuracy.round(3))
            st.write("Precision: ", precision_score(test_y, y_pred, labels=class_names).round(3))
            st.write("Recall: ", recall_score(test_y, y_pred, labels=class_names).round(3))
            utils.plot_metrics(metrics, models, test_x, test_y, class_names)

    if classifier == "Decision Tree Classifier":
        criterion = st.sidebar.radio("Criterion of splitting trees", ("gini", "entropy"), key='criterion')
        max_depth = st.sidebar.slider("Max depth of the tree", 1, 50, key='amx_depth')
        min_samples_leaf = st.sidebar.number_input("Minimum Leaf Samples", 1, 10, step=1, key='min_samples_leaf')
        max_features = st.sidebar.radio("No. of features to consider during best split", ("auto", "sqrt", "log2"),
                                        key='max_features')

        metrics = st.sidebar.multiselect("What matrix to plot?", ("Confusion Matrix", "ROC Curve",
                                                                  "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Decision Tree Classification Results")
            y_pred, accuracy, models = model.DT(train_x, test_x, train_y, test_y, criterion=criterion,
                                                max_depth=max_depth,
                                                leaf=min_samples_leaf, max_features=max_features)
            st.write("Accuracy: ", accuracy.round(3))
            st.write("Precision: ", precision_score(test_y, y_pred, labels=class_names).round(3))
            st.write("Recall: ", recall_score(test_y, y_pred, labels=class_names).round(3))
            utils.plot_metrics(metrics, models, test_x, test_y, class_names)

    if classifier == "Random Forest Classifier":
        st.sidebar.subheader("Model Hyperparameters")
        n_estimators = st.sidebar.slider("Number of Trees in the Random Forest", 100, 4000, key='n_estimators')
        max_depth = st.sidebar.number_input("The maximum depth of the tree", 1, 100, step=5, key='max_depth')
        bootstrap = st.sidebar.radio("Bootstrap samples when building trees", ("True", "False"), key='bootstrap')

        metrics = st.sidebar.multiselect("What matrix to plot?", ("Confusion Matrix", "ROC Curve",
                                                                  "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Random Forest Classification Results")
            y_pred, accuracy, models = model.RF(train_x, test_x, train_y, test_y, n_estimators=n_estimators,
                                                max_depth=max_depth, bootstrap=bootstrap)
            st.write("Accuracy: ", accuracy.round(3))
            st.write("Precision: ", precision_score(test_y, y_pred, labels=class_names).round(3))
            st.write("Recall: ", recall_score(test_y, y_pred, labels=class_names).round(3))
            utils.plot_metrics(metrics, models, test_x, test_y, class_names)

    if classifier == "Gradient Boosting Classifier":
        st.sidebar.subheader("Model Hyperparameters")
        n_estimators = st.sidebar.slider("Number of Trees in the Gradient Boost ensemble", 100, 4000,
                                         key='n_estimators')
        max_depth = st.sidebar.number_input("The maximum depth of the tree", 1, 100, step=5, key='max_depth')
        learning_rate = st.sidebar.number_input("Learning Rate", 0.01, 10.0, step=0.01, key='learning_rate')
        warm_start = st.sidebar.radio("Reuse previous solution for more ensemble", ("True", "False"), key='warm_start')

        metrics = st.sidebar.multiselect("What matrix to plot?", ("Confusion Matrix", "ROC Curve",
                                                                  "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Gradient Boosting Classification Results")
            y_pred, accuracy, models = model.GBC(train_x, test_x, train_y, test_y, n_estimators=n_estimators,
                                                 max_depth=max_depth, learning_rate=learning_rate,
                                                 warm_start=warm_start)
            st.write("Accuracy: ", accuracy.round(3))
            st.write("Precision: ", precision_score(test_y, y_pred, labels=class_names).round(3))
            st.write("Recall: ", recall_score(test_y, y_pred, labels=class_names).round(3))
            utils.plot_metrics(metrics, models, test_x, test_y, class_names)

    if classifier == "XGBoost Classifier":
        st.sidebar.subheader("Model Hyperparameters")
        n_estimators = st.sidebar.slider("Number of Trees in the XGBoost ensemble", 100, 4000, key='n_estimators')
        max_depth = st.sidebar.number_input("The maximum depth of the tree", 1, 100, step=5, key='max_depth')
        eta = st.sidebar.number_input("Learning Rate", 0.01, 10.0, step=0.01, key='eta')
        colsample_bytree = st.sidebar.number_input("Percentage of features used per tree", 0.01, 1.0, step=0.01,
                                                   key='colsample_bytree')
        reg_alpha = st.sidebar.number_input("L1 regularization on leaf weights", 1, 10, step=1, key='reg_alpha')
        reg_lambda = st.sidebar.number_input("L2 regularization on leaf weights", 1, 10, step=1, key='reg_lambda')

        metrics = st.sidebar.multiselect("What matrix to plot?", ("Confusion Matrix", "ROC Curve",
                                                                  "Precision-Recall Curve"))

        if st.sidebar.button("Classify", key="classify"):
            st.subheader("Extreme Gradient Boosting(XGBoost) Classification Results")
            y_pred, accuracy, models = model.XGB(train_x, test_x, train_y, test_y, n_estimators=n_estimators,
                                                 max_depth=max_depth, eta=eta, colsample_bytree=colsample_bytree,
                                                 reg_alpha=reg_alpha, reg_lambda=reg_lambda)
            st.write("Accuracy: ", accuracy.round(3))
            st.write("Precision: ", precision_score(test_y, y_pred, labels=class_names).round(3))
            st.write("Recall: ", recall_score(test_y, y_pred, labels=class_names).round(3))
            utils.plot_metrics(metrics, models, test_x, test_y, class_names)
Esempio n. 13
0
def train_subj(subj, save_dir, N_epoch=20, dev_set='AVG_100', plot=1):
    #%% load saved data (.mat) from MATLAB
    #subj = 's21'
    # (1)train set:
    # filename1 = 'EEG/SNR3D_set_s1.mat'
    filename1 = 'DL_trainset/AVG_ram100/' + subj + '.mat'
    file = loadmat(filename1)
    data1 = file['SNR3D_arr']

    # (2) dev set:
    # AVG_spectrum_s1 = 'EEG/SNR3D_spectrum_s1.mat'
    AVG_spectrum_s1 = 'DL_trainset/AVG96/' + subj + '.mat'
    file = loadmat(AVG_spectrum_s1)
    data1_dev = file['SNR3D']
    print('data1_dev:', data1_dev.shape)

    # check shape of INPUT data
    r, c = data1.shape
    for i in range(c):
        print(data1[0, i].shape)

    #%% orginal labels -> binary labels
    labels_full = [
        4, 6, 27, 33, 35, 37, 39, 43, 45, 66, 72, 76, 78, 99, 105, 111, 117
    ]
    labels_subset = [6, 33, 37, 39, 43, 66, 72, 78, 99, 117]  # (1)
    labels_subset2 = [6, 33, 37, 39, 43, 45, 66, 72, 76, 78, 99, 117]  # (2)
    labels_subset3 = [6, 33, 35, 37, 39, 43, 45, 66, 72, 76, 78, 99,
                      117]  # (3)

    # choose one annotation from above !
    labels_2use = labels_full

    labels_org = np.array(labels_2use) - 1  # python starts from 0
    labels_bi = xloc2binaryvec(labels_org)  # binary code label: 1 and 0

    #%% prepare {x_train, y_train, x_test, y_test}
    # option B: use AVG- [50, 60, 70, 80, 90] trials for train
    x_train = np.concatenate(
        (data1[0, 0], data1[0, 1], data1[0, 2], data1[0, 3], data1[0, 4]),
        axis=0)
    if dev_set == 'AVG_100':  # saved in folder named 'saved_models2'
        x_test_100tr = data1_dev[-1]  # dev set: AVG-100 trials  (1,:,:)
        x_test = np.reshape(x_test_100tr,
                            (1, x_test_100tr.shape[0], x_test_100tr.shape[1]))
    elif dev_set == 'AVG_50_100':  # saved in folder named 'mat3'
        # dev set: AVG 5-100 trials
        x_test = data1_dev[-50:, :, :]  # (50-100,:,:)
    elif dev_set == 'AVG_5_100':  # whole dev set: 'AVG_5_100' - saved in folder named 'saved_models'
        x_test = data1_dev  # (5-100, :,:)

    print('dev_set: ', x_test.shape)

    # normalize X to [0 - 1]
    x_train = normalize_matrix_1(x_train)
    x_test = normalize_matrix_1(x_test)

    # repeating Y (labels_bi) the number of times given by number of samples
    y_train = np.tile(labels_bi, (len(x_train), 1))  # (10k,120)
    y_test = np.tile(labels_bi, (len(x_test), 1))  # (100,120)

    # shuffle [x_train, y_train] 100x along the dimenstion of frequency '1-120'
    x_train, y_train = shuffle_trainset(x_train, y_train)

    #%% reshape for CNN: not neccesary, tf.keras will take care if this
    # x_train = x_train.reshape(x_train.shape[0], 12, 120, 1)
    # x_test = x_test.reshape(x_test.shape[0], 12, 120, 1)

    #%% define a model
    tf.keras.backend.clear_session()

    def create_model():
        # (C) CNN model (~2 conv layers): ~ 230k paras
        inputs = keras.Input(shape=(
            60, 120,
            1))  # 1 is needed here to keep the same dim with next conv2D layer
        x = layers.experimental.preprocessing.Rescaling(1.0 / 1)(inputs)
        x = layers.Conv2D(filters=8,
                          kernel_size=(3, 3),
                          padding='same',
                          activation="relu")(x)
        x = layers.MaxPooling2D(pool_size=(3, 3),
                                strides=(1, 1),
                                padding='same')(
                                    x)  # stride =1, not =3 be default
        x = layers.Dropout(.2)(x)
        x = layers.Conv2D(filters=4,
                          kernel_size=(3, 3),
                          padding='same',
                          activation="relu")(x)
        x = layers.MaxPooling2D(pool_size=(2, 2),
                                strides=(1, 1),
                                padding='same')(
                                    x)  # stride =1, not =2 be default
        x = layers.Dropout(.2)(x)
        x = layers.Flatten()(x)
        x = layers.Dense(1024,
                         activation="relu")(x)  # it does not further improve!
        outputs = layers.Dense(120, activation='sigmoid')(x)
        model = keras.Model(inputs, outputs)

        loss = tf.keras.losses.BinaryCrossentropy()
        # optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001,
                                             beta_1=0.9,
                                             beta_2=0.999,
                                             epsilon=1e-07)

        # METRICS = [
        #       keras.metrics.TruePositives(name='tp'),
        #       keras.metrics.FalsePositives(name='fp'),
        #       keras.metrics.TrueNegatives(name='tn'),
        #       keras.metrics.FalseNegatives(name='fn'),
        #       keras.metrics.BinaryAccuracy(name='accuracy'),
        #       keras.metrics.Precision(name='precision'),
        #       keras.metrics.Recall(name='recall'),
        #       keras.metrics.AUC(name='auc'),
        #       ]

        METRICS = [
            keras.metrics.BinaryAccuracy(name='accuracy'),
            keras.metrics.Precision(name='precision'),
            keras.metrics.Recall(name='recall'),
            keras.metrics.AUC(name='auc',
                              curve='PR'),  # curve='ROC' by default
        ]

        model.compile(
            optimizer=optimizer,
            loss=loss,
            metrics=METRICS,
        )

        return model

    #%% Build a model
    # Create a basic model instance
    model = create_model()
    # show model
    model.summary()

    #%% train
    # N_epoch = 20
    # (callback 1) for using TensorBoard
    callback_log = [keras.callbacks.TensorBoard(log_dir='./logs')]
    # Launch TensorBoard from the command line (1st cd: folder of this proj)
    # in cmd type: tensorboard --logdir logs

    # (callback 2) Create a callback that saves the model's weights
    # only model that achieves larger 'val_auc' is saved.
    checkpoint_path = "checkpoints/my_ckpt"
    callback_cp = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                     save_weights_only=True,
                                                     monitor='val_auc',
                                                     mode='max',
                                                     save_best_only=True,
                                                     verbose=1)
    start_time = time.time()
    # Train the model from Numpy data input
    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=32,
                        epochs=N_epoch,
                        callbacks=[callback_log, callback_cp])

    print('elapsed_time:', time.time() - start_time)  # =203 s for 100 epochs

    #%% plot learning curves - [loss, AUC, etc.]
    if plot == 1:
        plt.figure(figsize=(12, 10))
        plot_loss(history, "loss", 2)
        # plot metrics over epochs
        plt.figure(figsize=(12, 10))
        plot_metrics(history)

    #%% Evaluate the saved 'best model weights' by callback_func
    # Create a basic model instance
    model = create_model()
    # Display the model's architecture
    model.summary()
    # Evaluate the initial model
    loss, acc, _, _, _ = model.evaluate(x_test, y_test, verbose=2)
    print("Untrained model, accuracy: {:5.2f}%".format(100 * acc))

    # Re-load the model weights from the saved checkpoints
    model.load_weights(checkpoint_path)  # Loads the weights
    # Evaluate the saved model
    loss, acc, _, _, _ = model.evaluate(x_test, y_test, verbose=2)
    print("saved model, accuracy: {:5.2f}%".format(100 * acc))

    # save the entire trained model (not only weights in checkpoints)
    # choose the path for saving
    # save_dir = os.path.join(os.getcwd(), 'saved_models')
    # if not os.path.isdir(save_dir):
    #     os.makedirs(save_dir)

    model_name = subj + '_best_trained_model.h5'
    model_path = os.path.join(save_dir, model_name)
    model.save(model_path)
    print('Saved trained model at %s ' % model_path)

    #%% test the best_trained_model on x_test (AVG-100 trials) of day1 EEG
    if plot == 1:
        predictions = model.predict(x_test)
        predictions_mean = np.mean(predictions, axis=0)

        labels2plt = labels_org

        plt.figure(figsize=(16, 9))
        plt.imshow(predictions, cmap='gray')
        plot1bar(labels2plt, L=96)
        plt.xlabel('frequency')
        plt.ylabel('All test samples')
        plt.title('prediction on AVG 90 trials (with Ann)')

        plt.figure(figsize=(16, 9))
        plt.plot(predictions_mean)
        plot1bar(labels2plt, L=2)
        plt.grid(True)
        plt.xlabel('frequency')
        plt.ylabel('probability of ASSR')
        plt.title('Mean prediction')
# In[ ]:


with open('results.log', 'w') as f:
    lr=0.001
    opt = RAdam(network.get_trainable_parameters(lr))
#     opt = torch.optim.SGD(network.get_trainable_parameters(lr), lr=lr, momentum=0.9)
    res_metrics = network.fit(
        load_data, 
        epochs=nb_epochs,
        optimizer=opt, 
        dataset_size=dataset_size, 
        result_file=f,
        save_checkpoints=True
    )
    plot_metrics(res_metrics, save_plot_path='./metrics_' if SAVE_PLOTS else None)

network.save('save_network.net')
# network.load('save_network.net')


# In[ ]:


plot_one_batch(network)
print_and_plot_accuracy_metrics(
    network, 
    load_data('acc_train'), 
    load_data('acc_test'), 
    save_plot_path='./accuracy_' if SAVE_PLOTS else None
)
Esempio n. 15
0
def train(G, max_episodes, save_path):
    '''
    Trains a DQN to play pong.
    '''

    # Define some constants, lists, metrics, etc
    action_map = {1: 'x', 2: '^', 3: 'v'}  # Stay, up, down
    replay_memory = u.ReplayMemory(max_exp_len=REPLAY_MEM_LEN)
    step_list = []
    reward_list = []
    val_Q_list = []
    episode_length_list = []
    avg_episode_length_list = []
    episode_score_list = {'player': [], 'computer': []}
    X_val = u.load_validation_screens()

    # Initialize the Pong gym environment, set seeds
    env = gym.make('Pong-v0')
    np.random.seed(SEED)
    tf.set_random_seed(SEED)
    plt.ioff()

    # Gather screens

    # Initialize computational graph
    with G.as_default():
        # Get input/output tensors
        X = G.get_tensor_by_name('X:0')
        Y = G.get_tensor_by_name('Y:0')

        # Append loss function to graph
        Q = tf.placeholder(dtype=tf.float32, shape=[None], name='Q')
        A = tf.placeholder(dtype=tf.int32, shape=[None], name='A')
        mask = tf.one_hot(A, depth=3, dtype=tf.float32, axis=-1)
        L = tf.reduce_mean(tf.square(tf.reduce_sum(mask * Y, axis=-1) - Q),
                           name='L')

        # Define optimizer, training op, gradient clipping, etc.
        if not RELOAD_PARAMETERS:
            optimizer = tf.train.AdamOptimizer(LEARNING_RATE, name='Adam')
        else:
            optimizer = G.get_operation_by_name('Adam')
        saver = tf.train.Saver()
        # Initialize TF session
        with tf.Session() as sess:
            # Reload/initialize variables
            if RELOAD_PARAMETERS:
                print('Reloading from last checkpoint...')
                saver.restore(sess, save_path)
            else:
                print('Initializing variables...')
            gradients, variables = zip(*optimizer.compute_gradients(L))
            train_op = optimizer.apply_gradients(zip(gradients, variables))
            sess.run(tf.global_variables_initializer())
            # Iterate over episodes
            global_steps = 0
            for episode in range(max_episodes):
                obs = u.preprocess_image(env.reset())
                for i in range(3):
                    replay_memory.add_frame(
                        np.zeros((160 // DOWNSAMPLE, 160 // DOWNSAMPLE)))
                replay_memory.add_frame(obs)

                # Iterate over frames
                done = False
                frame = 0
                episode_score = [0, 0]
                while not done:
                    if (global_steps >= OBSERVE_STEPS):
                        # Feed state into DQN
                        s = np.stack(
                            [replay_memory.frames[i] for i in range(-4, 0)],
                            axis=-1).reshape(1, 160 // DOWNSAMPLE,
                                             160 // DOWNSAMPLE, 4)
                        y = sess.run(Y, feed_dict={X: s})

                        # Decide on action
                        epsilon = max(
                            MAX_EPSILON *
                            (1 - global_steps / EPSILON_ANNEALING_STEPS),
                            MIN_EPSILON)
                        if (np.random.rand() < epsilon):
                            a = np.random.choice([1, 2, 3])
                        else:
                            a = np.argmax(y) + 1
                    else:
                        a = np.random.choice([1, 2, 3])

                    # Take action, observe environment, reward
                    obs, r, done, _ = env.step(a)
                    r_sum = r
                    for i in range(STEPS_TO_SKIP):
                        obs, r, done_temp, _ = env.step(1)
                        r_sum += r
                        if done_temp == True:
                            done = True
                    if r_sum > 0:
                        episode_score[0] += r_sum
                    elif r_sum < 0:
                        episode_score[1] -= r_sum

                    # Add new state/reward to replay memory
                    replay_memory.add_frame(u.preprocess_image(obs))
                    experience = (np.stack(list(replay_memory.frames),
                                           axis=-1), a, r_sum, done)
                    replay_memory.add_exp(experience)

                    # Do training batch update
                    if (global_steps >= OBSERVE_STEPS):
                        S, A_, R, D = replay_memory.sample(BATCH_SIZE)
                        y2 = sess.run(Y, feed_dict={X: S[:, :, :, -4:]})
                        q = R + (1 - D) * GAMMA * np.max(y2, axis=1)
                        _, batch_loss = sess.run([train_op, L],
                                                 feed_dict={
                                                     X: S[:, :, :, -5:-1],
                                                     Q: q,
                                                     A: (A_ - 1)
                                                 })
                        if (batch_loss == np.nan):
                            print('nan error, exiting training')
                            exit()
                        elif (np.mean(np.max(y2, axis=-1)) > 1e2):
                            print('unstable Q value, exiting training')
                            exit()

                        # Print updates
                        print(
                            'Episode: {}/{},\tframe: {},\treward: {},\t<max(Q)>: {:.3e},\nmax(Q): {:.3e},\taction: {},\tcurrent std(Q)/mean(Q): {:.3e}'
                            .format(episode + 1, max_episodes, frame + 1,
                                    int(r_sum), np.mean(np.max(y2, axis=-1)),
                                    np.max(y), action_map[a],
                                    np.std(y) / np.mean(y)))

                        # Plot frame-by-frame metrics
                        if global_steps == 0:
                            avg_reward = r_sum
                        else:
                            avg_reward = (1 -
                                          np.exp(-1 / 500)) * r_sum + np.exp(
                                              -1 / 500) * avg_reward
                        if (global_steps % PLOT_EVERY_N_STEPS == 0):
                            step_list.append(global_steps)
                            reward_list.append(10 * avg_reward)
                            y_val = sess.run(Y, feed_dict={X: X_val})
                            val_Q_list.append(np.mean(np.max(y_val, axis=-1)))
                            u.plot_metrics(step_list, 'PongMetrics',
                                           'Pong Metrics', 'Global step', '',
                                           (val_Q_list, 'Validation <max(Q)>'),
                                           (reward_list, '10*<R>'))
                    else:
                        print('Observation step {}/{}'.format(
                            global_steps, OBSERVE_STEPS))

                    # Update state variables
                    global_steps += 1
                    frame += 1

                # Save parameters at end of episode, plot episode metrics
                saver.save(sess, SAVE_PATH)
                episode_length_list.append(frame * (STEPS_TO_SKIP + 1) / 1000)
                if episode == 0:
                    avg_episode_length = frame * (STEPS_TO_SKIP + 1)
                else:
                    avg_episode_length = (1 - np.exp(-1 / 10)) * frame * (
                        STEPS_TO_SKIP + 1) + np.exp(
                            -1 / 10) * avg_episode_length
                avg_episode_length_list.append(avg_episode_length / 1000)
                episode_score_list['player'].append(episode_score[0])
                episode_score_list['computer'].append(episode_score[1])
                u.plot_metrics(range(episode + 1), 'EpisodeLength',
                               'Episode Length', 'Episode', 'Length/1000',
                               (episode_length_list, 'Episode length'),
                               (avg_episode_length_list, 'Average'))
                u.plot_metrics(range(episode + 1), 'EpisodeScore',
                               'Episode Score', 'Episode', 'Score',
                               (episode_score_list['player'], 'Player'),
                               (episode_score_list['computer'], 'Computer'))