Ejemplo n.º 1
0
    def train(self, X, y, x_sensitive, fairness_constraint):
        self.x_sensitive = {"s1": x_sensitive}
        self.X = ut.add_intercept(X)
        self.y = y

        if fairness_constraint==-1.0:
            self.w = ut.train_model(self.X, self.y, self.x_sensitive, lf._logistic_loss, 0, 0, 0, 
                                    ["s1"], {"s1":0}, None)
        else:
            self.w = ut.train_model(self.X, self.y, self.x_sensitive, lf._logistic_loss, 1, 0, 0, 
                                    ["s1"], {"s1": fairness_constraint}, None)

        train_score, test_score, correct_answers_train, correct_answers_test = ut.check_accuracy(self.w, 
                                                                                   self.X, self.y, self.X, self.y, None, None)

        distances_boundary_test = (np.dot(self.X, self.w)).tolist()
        all_class_labels_assigned_test = np.sign(distances_boundary_test)
        correlation_dict_test = ut.get_correlations(None, None, 
                                    all_class_labels_assigned_test, self.x_sensitive, ["s1"])

        correlation_dict = ut.get_avg_correlation_dict([correlation_dict_test])
        non_prot_pos = correlation_dict["s1"][1][1]
        prot_pos = correlation_dict["s1"][0][1]
        p_rule = (prot_pos / non_prot_pos) * 100.0

	return self.w, p_rule, 100.0*test_score
	def train_test_classifier():
		w = ut.train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
		train_score, test_score, correct_answers_train, correct_answers_test = ut.check_accuracy(w, x_train, y_train, x_test, y_test, None, None)
		distances_boundary_test = (np.dot(x_test, w)).tolist()
		all_class_labels_assigned_test = np.sign(distances_boundary_test)
		correlation_dict_test = ut.get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs)
		cov_dict_test = ut.print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs)
		p_rule = ut.print_classifier_fairness_stats([test_score], [correlation_dict_test], [cov_dict_test], sensitive_attrs[0])	
		return w, p_rule, test_score
    dataloaders = {}
    dataloaders["train"] = train_dataloader
    dataloaders["val"] = val_dataloader

    # Define Model
    device = torch.device("cuda")

    model = ConvLSTM(cfg, bias=True, batch_first=True)

    if cfg["resume"]:
        model.load_state_dict(torch.load(cfg["weights"]))

    model.to(device)

    if cfg["optimizer"] == "sgd":
        optimizer = optim.SGD(
            model.parameters(),
            lr=cfg["lr"],
            momentum=0.9,
            weight_decay=cfg["weight_decay"],
        )
    else:
        optimizer = RAdam(
            model.parameters(), lr=cfg["lr"], weight_decay=cfg["weight_decay"]
        )

    criterion = nn.BCELoss()
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
    train_model(model, criterion, dataloaders, optimizer, exp_lr_scheduler, device, 100)

Ejemplo n.º 4
0
    # create path for saving model
    path = f'01.CNN - Saved Model/{n}-Layers/'
    # set model save settings
    checkpoint = ModelCheckpoint(path + 'CNN.Ep{epoch:02d}',
                                 save_weights_only=False,
                                 save_freq='epoch')
    # form callback list
    call_backs = [
        checkpoint, early_stop,
        utils.PredictionCallback(scaled_val_images, val_labels)
    ]

    # train the model with the scaled training images
    t0 = time.time()
    run_log = utils.train_model(model, scaled_train_images, train_labels,
                                scaled_val_images, val_labels, num_epoch,
                                batch_size, call_backs)
    t_train = round(time.time() - t0, 3)
    # retrieve history data
    history_data = pd.DataFrame(run_log.history)
    # print(history_data.head())

    # generate and format classification report
    max_epoch.append(call_backs[2].last_epoch)
    precision = []
    recall = []

    for i in range(call_backs[2].last_epoch
                   ):  # loop over all epochs for current  model
        report = " ".join(call_backs[2].report[i].split()
                          )  # remove extra spaces from report str
Ejemplo n.º 5
0
        # initilalize parameters of model
        model_ft.fc.apply(weights_init)
        model_ft = model_ft.to(device)
        # prepare optimizer & criterion
        # Observe that all parameters are being optimized
        optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
        # Decay LR by a factor of 0.1 every 7 epochs
        exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                               step_size=7,
                                               gamma=0.1)
        criterion = nn.CrossEntropyLoss()
        # train model
        best_model = train_model(model=model_ft,
                                 datasets=image_datasets,
                                 criterion=criterion,
                                 optimizer=optimizer_ft,
                                 device=device,
                                 num_epochs=epoch,
                                 batch_size=batch_size,
                                 out=out)
        # save best model
        torch.save(best_model.state_dict(), out / "best_model.pt")

    except Exception as error:
        import traceback
        traceback.print_exc()
    finally:
        # undo data immigration
        print("Undo data immigration")
        for path in val_dir.glob("*/*.jpg"):
            shutil.move(path, str(path).replace("val", "train"))
        print("Done!!", end="\n\n")
Ejemplo n.º 6
0
import data

from deepsense import neptune

ctx = neptune.Context()

model_name = ctx.params['model']
epochs = ctx.params['epochs']
learning_rate = ctx.params['learning_rate']

ctx.tags.append(model_name)

# data
dataloaders = data.get_dataloaders('/input', batch_size=128)

# network
model = models.MODELS[model_name]
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss(size_average=False)

print("Network created. Number of parameters:")
print(utils.count_params(model))

# training
trained_model = utils.train_model(model,
                                  criterion,
                                  optimizer,
                                  dataloaders,
                                  num_epochs=epochs)
utils.save_all(trained_model)
Ejemplo n.º 7
0
def main(args):
    debug = (args.debug == 'True')
    print(args)
    np.random.seed(args.seed)
    with tf.Graph().as_default():
        train_dataset, num_train_file = DateSet(args.file_list, args, debug)
        test_dataset, num_test_file = DateSet(args.test_list, args, debug)
        list_ops = {}

        batch_train_dataset = train_dataset.batch(args.batch_size).repeat()
        train_iterator = batch_train_dataset.make_one_shot_iterator()
        train_next_element = train_iterator.get_next()

        batch_test_dataset = test_dataset.batch(args.batch_size).repeat()
        test_iterator = batch_test_dataset.make_one_shot_iterator()
        test_next_element = test_iterator.get_next()

        list_ops['num_train_file'] = num_train_file
        list_ops['num_test_file'] = num_test_file

        model_dir = args.model_dir
        # if 'test' in model_dir and debug and os.path.exists(model_dir):
        #     import shutil
        #     shutil.rmtree(model_dir)
        # assert not os.path.exists(model_dir)
        # os.mkdir(model_dir)

        print('Total number of examples: {}'.format(num_train_file))
        print('Test number of examples: {}'.format(num_test_file))
        print('Model dir: {}'.format(model_dir))

        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        list_ops['global_step'] = global_step
        list_ops['train_dataset'] = train_dataset
        list_ops['test_dataset'] = test_dataset
        list_ops['train_next_element'] = train_next_element
        list_ops['test_next_element'] = test_next_element

        epoch_size = num_train_file // args.batch_size
        print('Number of batches per epoch: {}'.format(epoch_size))

        image_batch = tf.placeholder(tf.float32,
                                     shape=(None, args.image_size,
                                            args.image_size, 3),
                                     name='image_batch')
        landmark_batch = tf.placeholder(tf.float32,
                                        shape=(None, 196),
                                        name='landmark_batch')
        attribute_batch = tf.placeholder(tf.int32,
                                         shape=(None, 6),
                                         name='attribute_batch')
        euler_angles_gt_batch = tf.placeholder(tf.float32,
                                               shape=(None, 3),
                                               name='euler_angles_gt_batch')

        list_ops['image_batch'] = image_batch
        list_ops['landmark_batch'] = landmark_batch
        list_ops['attribute_batch'] = attribute_batch
        list_ops['euler_angles_gt_batch'] = euler_angles_gt_batch

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        list_ops['phase_train_placeholder'] = phase_train_placeholder

        print('Building training graph.')
        # total_loss, landmarks, heatmaps_loss, heatmaps= create_model(image_batch, landmark_batch,\
        #                                                                                phase_train_placeholder, args)
        landmarks_pre, landmarks_loss, euler_angles_pre = create_model(
            image_batch, landmark_batch, phase_train_placeholder, args)

        attributes_w_n = tf.to_float(attribute_batch[:, 1:6])
        # _num = attributes_w_n.shape[0]
        mat_ratio = tf.reduce_mean(attributes_w_n, axis=0)
        mat_ratio = tf.map_fn(
            lambda x:
            (tf.cond(x > 0, lambda: 1 / x, lambda: float(args.batch_size))),
            mat_ratio)
        attributes_w_n = tf.convert_to_tensor(attributes_w_n * mat_ratio)
        attributes_w_n = tf.reduce_sum(attributes_w_n, axis=1)
        list_ops['attributes_w_n_batch'] = attributes_w_n

        L2_loss = tf.add_n(tf.losses.get_regularization_losses())
        _sum_k = tf.reduce_sum(tf.map_fn(
            lambda x: 1 - tf.cos(abs(x)),
            euler_angles_gt_batch - euler_angles_pre),
                               axis=1)
        loss_sum = tf.reduce_sum(tf.square(landmark_batch - landmarks_pre),
                                 axis=1)
        loss_sum = tf.reduce_mean(loss_sum * _sum_k * attributes_w_n)
        loss_sum += L2_loss

        train_op, lr_op = train_model(loss_sum, global_step, num_train_file,
                                      args)

        list_ops['landmarks'] = landmarks_pre
        list_ops['L2_loss'] = L2_loss
        list_ops['loss'] = loss_sum
        list_ops['train_op'] = train_op
        list_ops['lr_op'] = lr_op

        test_mean_error = tf.Variable(tf.constant(0.0),
                                      dtype=tf.float32,
                                      name='ME')
        test_failure_rate = tf.Variable(tf.constant(0.0),
                                        dtype=tf.float32,
                                        name='FR')
        test_10_loss = tf.Variable(tf.constant(0.0),
                                   dtype=tf.float32,
                                   name='TestLoss')
        train_loss = tf.Variable(tf.constant(0.0),
                                 dtype=tf.float32,
                                 name='TrainLoss')
        train_loss_l2 = tf.Variable(tf.constant(0.0),
                                    dtype=tf.float32,
                                    name='TrainLoss2')
        tf.summary.scalar('test_mean_error', test_mean_error)
        tf.summary.scalar('test_failure_rate', test_failure_rate)
        tf.summary.scalar('test_10_loss', test_10_loss)
        tf.summary.scalar('train_loss', train_loss)
        tf.summary.scalar('train_loss_l2', train_loss_l2)

        save_params = tf.trainable_variables()
        saver = tf.train.Saver(save_params, max_to_keep=None)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)

        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=False,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        with sess.as_default():
            epoch_start = 0
            if args.pretrained_model:
                pretrained_model = args.pretrained_model
                if not os.path.isdir(pretrained_model):
                    print('Restoring pretrained model: {}'.format(
                        pretrained_model))
                    saver.restore(sess, args.pretrained_model)
                else:
                    print('Model directory: {}'.format(pretrained_model))
                    ckpt = tf.train.get_checkpoint_state(pretrained_model)
                    model_path = ckpt.model_checkpoint_path
                    assert (ckpt and model_path)
                    epoch_start = int(
                        model_path[model_path.find('model.ckpt-') + 11:]) + 1
                    print('Checkpoint file: {}'.format(model_path))
                    saver.restore(sess, model_path)

            # if args.save_image_example:
            #     save_image_example(sess, list_ops, args)

            print('Running train.')

            merged = tf.summary.merge_all()
            train_write = tf.summary.FileWriter(log_dir, sess.graph)
            for epoch in range(epoch_start, args.max_epoch):
                start = time.time()
                train_L, train_L2 = train(sess, epoch_size, epoch, list_ops)
                print("train time: {}".format(time.time() - start))

                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                metagraph_path = os.path.join(model_dir, 'model.meta')
                saver.save(sess,
                           checkpoint_path,
                           global_step=epoch,
                           write_meta_graph=False)
                if not os.path.exists(metagraph_path):
                    saver.export_meta_graph(metagraph_path)

                start = time.time()
                test_ME, test_FR, test_loss = test(sess, list_ops, args)
                print("test time: {}".format(time.time() - start))

                summary, _, _, _, _, _ = sess.run([
                    merged,
                    test_mean_error.assign(test_ME),
                    test_failure_rate.assign(test_FR),
                    test_10_loss.assign(test_loss),
                    train_loss.assign(train_L),
                    train_loss_l2.assign(train_L2)
                ])
                train_write.add_summary(summary, epoch)
Ejemplo n.º 8
0
def main():
    start = time.time()
    parser = args.parse_args()

    # run some checks on arguments
    check_args(parser)

    # format logging
    log_name = os.path.join(
        parser.run_log,
        '{}_run_log_{}.log'.format(parser.experiment,
                                   dt.now().strftime("%Y%m%d_%H%M")))

    log.basicConfig(filename=log_name,
                    format='%(asctime)s | %(name)s -- %(message)s',
                    level=log.INFO)
    os.chmod(log_name, parser.access_mode)

    # set devise to CPU if available
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    log.info("Starting experiment {} VN -> EN NMT on {}.".format(
        parser.experiment, device))

    # set seed for replication
    random.seed(parser.seed)
    np.random.seed(parser.seed)
    torch.manual_seed(parser.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(parser.seed)
    log.info("For reproducibility, the seed is set to {}.".format(parser.seed))

    # set file paths
    source_name = parser.source_name
    target_name = parser.target_name

    # get saved models dir
    base_saved_models_dir = parser.save_dir
    saved_models_dir = os.path.join(base_saved_models_dir,
                                    source_name + '2' + target_name)
    plots_dir = parser.plots_dir

    log.info("We will save the models in this directory: {}".format(
        saved_models_dir))
    log.info("We will save the plots in this directory: {}".format(plots_dir))

    # get data dir
    main_data_path = parser.data_dir
    path_to_train_data = {
        'source': main_data_path + 'train.' + source_name,
        'target': main_data_path + 'train.' + target_name
    }
    path_to_dev_data = {
        'source': main_data_path + 'dev.' + source_name,
        'target': main_data_path + 'dev.' + target_name
    }
    # get language objects
    saved_language_model_dir = os.path.join(saved_models_dir, 'lang_obj')

    # get dictionary of datasets
    dataset_dict = {
        'train':
        nmt_dataset.LanguagePair(source_name=source_name,
                                 target_name=target_name,
                                 filepath=path_to_train_data,
                                 lang_obj_path=saved_language_model_dir,
                                 minimum_count=1),
        'dev':
        nmt_dataset.LanguagePair(source_name=source_name,
                                 target_name=target_name,
                                 filepath=path_to_dev_data,
                                 lang_obj_path=saved_language_model_dir,
                                 minimum_count=1)
    }

    # get max sentence length by 99% percentile
    MAX_LEN = int(dataset_dict['train'].main_df['source_len'].quantile(0.9999))
    log.info("MAX_LEN (99th Percentile) = {}".format(MAX_LEN))
    batchSize = parser.batch_size
    log.info("Batch size = {}.".format(batchSize))

    dataloader_dict = {
        'train':
        DataLoader(dataset_dict['train'],
                   batch_size=batchSize,
                   collate_fn=partial(nmt_dataset.vocab_collate_func,
                                      MAX_LEN=MAX_LEN),
                   shuffle=True,
                   num_workers=0),
        'dev':
        DataLoader(dataset_dict['dev'],
                   batch_size=batchSize,
                   collate_fn=partial(nmt_dataset.vocab_collate_func,
                                      MAX_LEN=MAX_LEN),
                   shuffle=True,
                   num_workers=0)
    }

    # Configuration
    source_lang_obj = dataset_dict['train'].source_lang_obj
    target_lang_obj = dataset_dict['train'].target_lang_obj

    source_vocab = dataset_dict['train'].source_lang_obj.n_words
    target_vocab = dataset_dict['train'].target_lang_obj.n_words
    hidden_size = parser.hidden_size
    rnn_layers = parser.rnn_layers
    lr = parser.learning_rate
    longest_label = parser.longest_label
    gradient_clip = parser.gradient_clip
    num_epochs = parser.epochs

    log.info(
        "The source vocab ({}) has {} words and target vocab ({}) has {} words"
        .format(source_name, source_vocab, target_name, target_vocab))

    # encoder model
    encoder_rnn = nnet_models_new.EncoderRNN(input_size=source_vocab,
                                             hidden_size=hidden_size,
                                             numlayers=rnn_layers)
    # decoder model
    decoder_rnn = nnet_models_new.DecoderRNN(output_size=target_vocab,
                                             hidden_size=hidden_size,
                                             numlayers=rnn_layers)

    # seq2seq model
    nmt_rnn = nnet_models_new.seq2seq(
        encoder_rnn,
        decoder_rnn,
        lr=lr,
        hiddensize=hidden_size,
        numlayers=hidden_size,
        target_lang=dataset_dict['train'].target_lang_obj,
        longest_label=longest_label,
        clip=gradient_clip,
        device=device)

    log.info(
        "Seq2Seq Model with the following parameters: batch_size = {}, learning_rate = {}, hidden_size = {}, rnn_layers = {}, lr = {}, longest_label = {}, gradient_clip = {}, num_epochs = {}, source_name = {}, target_name = {}"
        .format(batchSize, lr, hidden_size, rnn_layers, lr, longest_label,
                gradient_clip, num_epochs, source_name, target_name))

    # do we want to train again?
    train_again = False

    # check if there is a saved model and if we want to train again
    if os.path.exists(utils.get_full_filepath(saved_models_dir,
                                              'rnn')) and (not train_again):
        log.info("Retrieving saved model from {}".format(
            utils.get_full_filepath(saved_models_dir, 'rnn')))
        nmt_rnn = torch.load(utils.get_full_filepath(saved_models_dir, 'rnn'),
                             map_location=global_variables.device)
    # train model again
    else:
        log.info("Check if this path exists: {}".format(
            utils.get_full_filepath(saved_models_dir, 'rnn')))
        log.info("It does not exist! Starting to train...")
        utils.train_model(dataloader_dict,
                          nmt_rnn,
                          num_epochs=num_epochs,
                          saved_model_path=saved_models_dir,
                          enc_type='rnn_test')
    log.info("Total time is: {} min : {} s".format((time.time() - start) // 60,
                                                   (time.time() - start) % 60))
    log.info("We will save the models in this directory: {}".format(
        saved_models_dir))

    # generate translations
    use_cuda = True
    utils.get_translation(nmt_rnn, 'I love to watch science movies on Mondays',
                          source_lang_obj, use_cuda, source_name, target_name)
    utils.get_translation(nmt_rnn,
                          'I want to be the best friend that I can be',
                          source_lang_obj, use_cuda, source_name, target_name)
    utils.get_translation(nmt_rnn, 'I love you', source_lang_obj, use_cuda,
                          source_name, target_name)
    utils.get_translation(
        nmt_rnn,
        'I love football, I like to watch it with my friends. It is always a great time.',
        source_lang_obj, use_cuda, source_name, target_name)
    utils.get_translation(
        nmt_rnn,
        'I do not know what I would do without pizza, it is very tasty to eat. If I could have any food in the world it would probably be pizza.',
        source_lang_obj, use_cuda, source_name, target_name)
    utils.get_translation(
        nmt_rnn,
        'Trump is the worst president in all of history. He can be a real racist and say very nasty things to people of color.',
        source_lang_obj, use_cuda, source_name, target_name)
    utils.get_translation(nmt_rnn, 'Thank you very much.', source_lang_obj,
                          use_cuda, source_name, target_name)
    utils.get_translation(nmt_rnn, 'Think about your own choices.',
                          source_lang_obj, use_cuda, source_name, target_name)
    utils.get_translation(
        nmt_rnn,
        'I recently did a survey with over 2,000 Americans , and the average number of choices that the typical American reports making is about 70 in a typical day .',
        source_lang_obj, use_cuda, source_name, target_name)

    # export plot
    log.info("Exported Binned Bleu Score Plot to {}!".format(plots_dir))
    _, _, fig = utils.get_binned_bl_score(nmt_rnn,
                                          dataset_dict['dev'],
                                          plots_dir,
                                          batchSize=batchSize)
Ejemplo n.º 9
0
from __future__ import absolute_import

import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3

print('tf version: ', tf.__version__)

try:
    from .utils import train_model, nb_classes, INPUT_SHAPE
except:
    from utils import train_model, nb_classes, INPUT_SHAPE

iv3 = InceptionV3(input_shape=INPUT_SHAPE,
                  weights=None,
                  include_top=True,
                  classes=nb_classes)
iv3.compile(optimizer='RMSprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

train_model(iv3)
Ejemplo n.º 10
0
            model.load_state_dict(state['model'])
            optim.load_state_dict(state['optim'])
        else:
            raise Exception("Invalid weight path")

    # Init weight dir
    weight_folder = os.path.join(experiment_folder, "weights")
    Path(weight_folder).mkdir(parents=True, exist_ok=True)

    # Train model
    print("Start training %d epochs" % args.num_epochs)
    for e in range(args.initial_epoch, args.num_epochs + 1):
        logger.info("Epoch %02d/%02d" % (e, args.num_epochs))
        logger.info("Start training")
        print("\nEpoch %02d/%02d" % (e, args.num_epochs), flush=True)
        save_file = os.path.join(weight_folder, 'epoch_%02d.h5' % e)
        train_loss = train_model(model,
                                 optim,
                                 train_iter,
                                 src_pad_token,
                                 use_mask=model_param["use_mask"],
                                 device=device,
                                 save_path=save_file)
        logger.info("End training")
        logger.info("train_loss = %.8f" % train_loss)
        val_loss = evaluate_model(model,
                                  val_iter,
                                  src_pad_token,
                                  use_mask=model_param["use_mask"],
                                  device=device)
        logger.info("val_loss   = %.8f\n" % val_loss)
Ejemplo n.º 11
0
my_split=poses[0]
my_split=[path[:-4] for path in my_split]


"""Use SRGAN"""
srgan = generator()
srgan.load_weights('weights/srgan/gan_generator.h5')


"""Upload customed cnn model"""
cnn = CNN(256, 256, 3, 101)
cnn.load_weights('weights/custom/cnn_plus.h5')
plot_model(cnn, to_file='./model.png', show_shapes=True, show_layer_names=True)


train_model(2, 'cnn_plus', cnn, srgan)

#filepath="./cnn_weights.h5"
#checkpoint = ModelCheckpoint(filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
#callbacks_list = [checkpoint]

"""Prepare and train on a batch of data and labels, 10 iterations"""
for i in range(2):
    train_set = devide(24, 2, 2)
    X = tensor2numpy('./data/', train_set, srgan)
    x = [X[i] for i in X.keys()]
    train = np.array(x, dtype = "float64")
    y = create_onehot(X)
    history = cnn.fit(train, y, batch_size=32, epochs=5, callbacks=callbacks_list, validation_split=0.2)
    # Plot training & validation accuracy values
    plt.plot(history.history['loss'])
Ejemplo n.º 12
0
def main(args):
    debug = (args.debug == 'True')
    print(args)
    np.random.seed(args.seed)
    with tf.Graph().as_default():
        train_dataset, num_train_file = DateSet(args.file_list, args, debug)
        test_dataset, num_test_file = DateSet(args.test_list, args, debug)
        list_ops = {}

        batch_train_dataset = train_dataset.batch(args.batch_size).repeat()
        train_iterator = batch_train_dataset.make_one_shot_iterator()
        train_next_element = train_iterator.get_next()

        batch_test_dataset = test_dataset.batch(args.batch_size).repeat()
        test_iterator = batch_test_dataset.make_one_shot_iterator()
        test_next_element = test_iterator.get_next()

        list_ops['num_train_file'] = num_train_file
        list_ops['num_test_file'] = num_test_file

        model_dir = args.model_dir
        if 'test' in model_dir and debug and os.path.exists(model_dir):
            import shutil
            shutil.rmtree(model_dir)
        assert not os.path.exists(model_dir)
        os.mkdir(model_dir)

        print('Total number of examples: {}'.format(num_train_file))
        print('Test number of examples: {}'.format(num_test_file))
        print('Model dir: {}'.format(model_dir))

        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        list_ops['global_step'] = global_step
        list_ops['train_dataset'] = train_dataset
        list_ops['test_dataset'] = test_dataset
        list_ops['train_next_element'] = train_next_element
        list_ops['test_next_element'] = test_next_element

        epoch_size = num_train_file // args.batch_size
        print('Number of batches per epoch: {}'.format(epoch_size))

        image_batch = tf.placeholder(tf.float32, shape=(None, args.image_size, args.image_size, 3),\
                                     name='image_batch')
        landmark_batch = tf.placeholder(tf.float32,
                                        shape=(None, 196),
                                        name='landmark_batch')
        attribute_batch = tf.placeholder(tf.int32,
                                         shape=(None, 6),
                                         name='attribute_batch')
        euler_angles_gt_batch = tf.placeholder(tf.float32,
                                               shape=(None, 3),
                                               name='euler_angles_gt_batch')
        w_n = tf.placeholder(tf.float32, shape=(None), name='w_n')

        list_ops['image_batch'] = image_batch
        list_ops['landmark_batch'] = landmark_batch
        list_ops['attribute_batch'] = attribute_batch
        list_ops['euler_angles_gt_batch'] = euler_angles_gt_batch
        list_ops['w_n'] = w_n

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        list_ops['phase_train_placeholder'] = phase_train_placeholder

        print('Building training graph.')
        # total_loss, landmarks, heatmaps_loss, heatmaps= create_model(image_batch, landmark_batch,\
        #                                                                                phase_train_placeholder, args)

        landmarks_pre, landmarks_loss,euler_angles_pre = create_model(image_batch, landmark_batch,\
                                                                              phase_train_placeholder, args)

        L2_loss = tf.add_n(tf.losses.get_regularization_losses())
        _sum_k = tf.reduce_sum(tf.map_fn(lambda x: 1 - tf.cos(abs(x)), \
                                         euler_angles_gt_batch - euler_angles_pre), axis=1)

        loss_sum = tf.reduce_sum(tf.square(landmark_batch - landmarks_pre),
                                 axis=1)
        loss_sum = tf.reduce_mean(loss_sum * _sum_k * w_n)
        loss_sum += L2_loss

        train_op, lr_op = train_model(loss_sum, global_step, num_train_file,
                                      args)

        list_ops['landmarks'] = landmarks_pre
        list_ops['L2_loss'] = L2_loss
        list_ops['loss'] = loss_sum
        list_ops['train_op'] = train_op
        list_ops['lr_op'] = lr_op

        save_params = tf.trainable_variables()
        saver = tf.train.Saver(save_params, max_to_keep=None)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=False,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        with sess.as_default():
            if args.pretrained_model:
                pretrained_model = args.pretrained_model
                if (not os.path.isdir(pretrained_model)):
                    print('Restoring pretrained model: {}'.format(
                        pretrained_model))
                    saver.restore(sess, args.pretrained_model)
                else:
                    print('Model directory: {}'.format(pretrained_model))
                    ckpt = tf.train.get_checkpoint_state(pretrained_model)
                    model_path = ckpt.model_checkpoint_path
                    assert (ckpt and model_path)
                    print('Checkpoint file: {}'.format(model_path))
                    saver.restore(sess, model_path)

            if args.save_image_example:
                save_image_example(sess, list_ops, args)

            print('Running train.')
            for epoch in range(args.max_epoch):
                train(sess, epoch_size, epoch, list_ops)
                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                metagraph_path = os.path.join(model_dir, 'model.meta')
                saver.save(sess,
                           checkpoint_path,
                           global_step=epoch,
                           write_meta_graph=False)
                if not os.path.exists(metagraph_path):
                    saver.export_meta_graph(metagraph_path)

                test(sess, list_ops, args)
Ejemplo n.º 13
0
from dataset import create_dataloaders
from history import plot_history
from utils import train_model

dataset_path = "/home/shouki/Desktop/Programming/Python/AI/Datasets/ImageData/CarvanaImageMaskingDataset"
image_size = (128, 128)
batch_size = 64
device = torch.device("cuda")
train_dataloader, validation_dataloader = create_dataloaders(
    dataset_path, image_size, batch_size)

num_epochs = 10

model = segmentation_models_pytorch.Unet("resnet18",
                                         encoder_weights="imagenet",
                                         classes=1,
                                         activation=None).to(device)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       mode="min",
                                                       patience=3,
                                                       verbose=True)

history = train_model(model, criterion, optimizer, scheduler, num_epochs,
                      train_dataloader, validation_dataloader, device)
plot_history(history, num_epochs)

with open("./histories/history.pkl", "wb") as f:
    pickle.dump(history, f)
Ejemplo n.º 14
0
                                   shuffle=True,
                                   num_workers=4)
    for x in ['train', 'val']
}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# model
model_ft = BCNN()

model_ft = model_ft.to(device)

criterion = nn.CrossEntropyLoss()

# Observe that all parameters are being optimized
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()),
                      lr=0.1,
                      momentum=0.9,
                      weight_decay=0.001)

# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
num_epochs = 15

model_ft = train_model(model_ft, criterion, optimizer, exp_lr_scheduler,
                       num_epochs, dataloaders, dataset_sizes, device)

# save params
save_model(model_ft.state_dict(), 'bcnn_step1')
Ejemplo n.º 15
0
def test_mlp(l_rate=0.01,
             n_iter=20,
             batch_size=128,
             n_hidden_layers=4,
             n_units_in=512,
             n_units_hidden=512,
             test_at_iter=5,
             n_test_samples=20,
             W_len_scale=1e-6,
             b_len_scale=1e-6,
             sigma_W=1e-3,
             tune_sigma_W=True,
             sigma_b=1e-6,
             tune_sigma_b=True,
             diag_noise=True,
             approx_cols=False,
             seed=1234,
             plot=False,
             mnist=True):

    # load data
    if mnist:
        n_in = 28 * 28
        n_out = 10

        X_train, y_train, X_test, y_test, n_train_batches, n_test_batches = (
            prepare_mnist_data(batch_size=batch_size))
    else:  # CIFAR-10
        n_in = 3 * 32 * 32
        n_out = 10

        X_train, y_train, X_test, y_test, n_train_batches, n_test_batches = (
            prepare_cifar10_data(batch_size=batch_size))

    print('... building the model')

    # define symbolic variables
    index = T.lscalar('index')
    X = T.matrix('X', dtype=floatX)
    y = T.ivector('y')

    # create model
    model = prepare_model(X=X,
                          n_hidden_layers=n_hidden_layers,
                          n_in=n_in,
                          n_out=n_out,
                          n_units_in=n_units_in,
                          n_units_hidden=n_units_hidden,
                          sigma_W=sigma_W,
                          tune_sigma_W=tune_sigma_W,
                          sigma_b=sigma_b,
                          tune_sigma_b=tune_sigma_b,
                          W_len_scale=W_len_scale,
                          b_len_scale=b_len_scale,
                          diag_noise=diag_noise,
                          approx_cols=approx_cols,
                          seed=seed)

    # compile theano functions
    train, test_predict = prepare_functions(model=model,
                                            X=X,
                                            y=y,
                                            index=index,
                                            X_train=X_train,
                                            X_test=X_test,
                                            y_train=y_train,
                                            batch_size=batch_size,
                                            l_rate=l_rate)

    cost_stats, ll_stats, kl_W_stats, kl_b_stats, test_err = train_model(
        train=train,
        test_predict=test_predict,
        y_test=y_test,
        batch_size=batch_size,
        test_at_iter=test_at_iter,
        n_iter=n_iter,
        n_test_batches=n_test_batches,
        n_test_samples=n_test_samples,
        n_train_batches=n_train_batches)

    fname = ('gaussBNN_mnist_%dn_%du_%dk_%.2ew_%.2eb_%sm_%se_%.2el_%di_%dt'
             '_cols_%s_diag_%s.save' %
             (n_hidden_layers, n_units_in, n_units_hidden, sigma_W, sigma_b,
              str(tune_sigma_W), str(tune_sigma_b), l_rate, n_iter,
              test_at_iter, str(approx_cols), str(diag_noise)))
    out_file = os.path.join(out_dir, fname)
    with open(out_file, 'wb') as f:
        pickle.dump(model.get_param_dictionary(), f)
        pickle.dump([cost_stats, ll_stats, kl_b_stats, kl_W_stats, test_err],
                    f)

    if plot:
        fig, ax = plt.subplots(2, 2)

        ax[0, 0].plot(cost_stats)
        ax[0, 0].set_title('cost')
        ax[0, 1].plot(ll_stats)
        ax[0, 1].set_title('mean log likelihood')
        ax[1, 0].plot(kl_W_stats)
        ax[1, 0].set_title('1/N KL(q(W) || p(W)) + C')
        ax[1, 1].plot(kl_b_stats)
        ax[1, 1].set_title('1/N KL(q(b) || p(b)) + C')

        plt.show()
Ejemplo n.º 16
0
    def train_models(self,
                     epochs,
                     train_dataset,
                     eval_dataset,
                     test_dataset,
                     optimizer,
                     scheduler=None,
                     regularization=None,
                     early_stopping=None,
                     **kwargs):

        if self.shrink_iterations == 0:
            bar = tqdm(range(self.shrink_iterations + 1),
                       desc='Shrink Iterations',
                       disable=True)
        else:
            bar = tqdm(range(self.shrink_iterations + 1),
                       desc='Shrink Iterations')

        for i in bar:
            last_iteration = i == self.shrink_iterations

            if last_iteration:
                pruning = self.prune_percentage
                ens = self.ensemble
            else:
                pruning = self.shrink_pruning
                ens = 1

            add_wrappers_to_model(self.model,
                                  masks_params=self.supermask_parameters,
                                  ensemble=ens,
                                  batch_ensemble=True)

            params = [
                param for name, param in self.model.named_parameters()
                if param.requires_grad and 'distributions' in name
            ]

            for _, module in self.model.named_modules():
                if isinstance(module, _BatchNorm):
                    params.extend(module.named_parameters())

            optim = Adam([
                param for name, param in self.model.named_parameters()
                if param.requires_grad and 'distributions' in name
            ], self.lr)

            train_scheduler = scheduler(optim)

            best_model, scores, best_model_scores, losses = be_model_training(
                model=self.model,
                optimizer=optim,
                epochs=self.mask_epochs,
                train_loader=train_dataset,
                scheduler=train_scheduler,
                early_stopping=early_stopping,
                test_loader=test_dataset,
                eval_loader=eval_dataset,
                device=self.device,
                w=self.divergence_w)
            self.model.load_state_dict(best_model)

            for _, module in self.model.named_modules():
                if isinstance(module, _BatchNorm):
                    module.reset_parameters()

            grads = defaultdict(lambda: defaultdict(list))

            for i, (x, y) in enumerate(train_dataset):
                x, y = x.to(self.device), y.to(self.device)
                bs = x.shape[0]
                x = torch.cat([x for _ in range(ens)], dim=0)

                outputs = self.model(x)
                outputs = outputs.view([ens, bs, -1])
                pred = torch.mean(outputs, 0)

                loss = torch.nn.functional.cross_entropy(pred,
                                                         y,
                                                         reduction='mean')

                self.model.zero_grad()
                loss.backward(retain_graph=True)

                for name, module in self.model.named_modules():
                    if isinstance(module, BatchEnsembleMaskedWrapper):
                        grad = torch.autograd.grad(loss,
                                                   module.last_mask,
                                                   retain_graph=True)
                        if last_iteration:
                            for i, g in enumerate(grad):
                                grads[i][name].append(torch.abs(g).cpu())
                        else:
                            grads[0][name].append(
                                torch.abs(torch.mean(torch.stack(grad, 0),
                                                     0)).cpu())

            self.model.zero_grad()

            remove_wrappers_from_model(self.model)

            for mi, ens_grads in tqdm(grads.items(),
                                      desc='Extracting inner models'):
                f = lambda x: torch.mean(x, 0)

                if self.grad_reduce == 'median':
                    f = lambda x: torch.median(x, 0)
                elif self.grad_reduce == 'max':
                    f = lambda x: torch.max(x, 0)

                ens_grads = {
                    name: f(torch.stack(gs, 0)).detach().cpu()
                    for name, gs in ens_grads.items()
                }

                masks = get_masks_from_gradients(
                    gradients=ens_grads,
                    prune_percentage=pruning,
                    global_pruning=self.global_pruning,
                    device=self.device)

                model = extract_inner_model(self.model,
                                            masks,
                                            re_init=self.re_init)

                if last_iteration:
                    self.models.append(model)
                else:
                    self.model = model

                # p = calculate_trainable_parameters(model)
                # print(mi, last_iteration, p, calculate_trainable_parameters(self.model))

        all_scores = []

        for i in tqdm(range(len(self.models)), desc='Training models'):
            model = self.models[i]

            optim = optimizer([
                param for name, param in model.named_parameters()
                if param.requires_grad
            ])

            train_scheduler = scheduler(optim)

            best_model, scores, best_model_scores, losses = train_model(
                model=model,
                optimizer=optim,
                epochs=epochs,
                train_loader=train_dataset,
                scheduler=train_scheduler,
                early_stopping=early_stopping,
                test_loader=test_dataset,
                eval_loader=eval_dataset,
                device=self.device)

            model.load_state_dict(best_model)
            all_scores.append(scores)
        return all_scores
Ejemplo n.º 17
0
import pandas as pd
from sklearn.model_selection import train_test_split
import pickle
from config import DeepConnConfig
from data_set import DeepConnDataset
from model import DeepCoNN
from pre_precessing import load_embedding_weights,  get_review_dict
from utils import train_model, val_iter
import torch
from torch.utils.data import DataLoader
path = '../data/office/'
device = torch.device('cuda:0')

df = pd.read_json(path+'reviews.json', lines=True)
train, test = train_test_split(df, test_size=0.2, random_state=3)
train, dev = train_test_split(train, test_size=0.2, random_state=4)


config = DeepConnConfig()
model = DeepCoNN(config, load_embedding_weights())
train_model(model, train, dev, config)

review_by_user, review_by_item = get_review_dict('test')
test_dataset = DeepConnDataset(test, review_by_user, review_by_item, config)
test_dataload = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=True, pin_memory=True)

model = torch.load(path+'best_model/best_model').to(device)
mse = val_iter(model, test_dataload)
print('test mse is {}'.format(mse))
Ejemplo n.º 18
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch Cifar10 Example')
    # Model parameters
    parser.add_argument('--model', type=str, default='VGGBinaryConnect', help='Model name: VGGBinaryConnect, VGGBinaryConnect_STE')
    parser.add_argument('--bnmomentum', type=float, default=0.2, help='BN layer momentum value')

    # Optimization parameters
    parser.add_argument('--optim', type=str, default='BayesBiNN', help='Optimizer: BayesBiNN, STE, Adam')
    parser.add_argument('--val-split', type=float, default=0.1, help='Random validation set ratio')
    parser.add_argument('--criterion', type=str, default='cross-entropy', help='loss funcion: square-hinge or cross-entropy')
    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--train-samples', type=int,default=1, metavar='N',
                        help='number of Monte Carlo samples used in BayesBiNN (default: 1), if 0, point estimate using mean')
    parser.add_argument('--test-samples', type=int,default=0, metavar='N',
                        help='number of Monte Carlo samples used in evaluation for BayesBiNN (default: 1)')
    parser.add_argument('--epochs', type=int, default=500, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default= 1e-4, metavar='LR',
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--lr-end', type=float, default= 1e-16, metavar='LR',
                        help='end learning rate (default: 0.01)')
    parser.add_argument('--lr-decay', type=float, default= 0.9, metavar='LR-decay',
                        help='learning rated decay factor for each epoch (default: 0.9)')
    parser.add_argument('--decay-steps', type=int, default=1, metavar='N',
                        help='LR rate decay steps (default: 1)')   

    parser.add_argument('--momentum', type=float, default=0.0, metavar='M',
                        help='BayesBiNN momentum (default: 0.9)')
    parser.add_argument('--data-augmentation', action='store_true', default=True, help='Enable data augmentation')
    # Logging parameters
    parser.add_argument('--log-interval', type=int, default=10000, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    parser.add_argument('--experiment-id', type=int, default=0, help='Experiment ID for log files (int)')
    # Computation parameters
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')

    parser.add_argument('--lrschedular', type=str, default='Cosine', help='Mstep,Expo,Cosine')


    parser.add_argument('--trainset_scale', type=int, default=10, metavar='N',
                        help='scale of the training set used in data augmentation')


    parser.add_argument('--lamda', type=float, default= 10, metavar='lamda-init',
                        help='initial mean value of the natural parameter lamda(default: 10)')

    parser.add_argument('--lamda-std', type=float, default= 0, metavar='lamda-init',
                        help='linitial std value of the natural parameter lamda(default: 0)')

    parser.add_argument('--temperature', type=float, default= 1e-10, metavar='temperature',
                        help='temperature for BayesBiNN')

    parser.add_argument('--bn-affine', type=float, default= 0, metavar='bn-affine',
                        help='whether there is bn learnable parameters, 1: learnable, 0: no (default: 0)')


    args = parser.parse_args()

    if args.model == 'MLPBinaryConnect_STE':
        args.optim = 'STE' # in this case, only STE optimizer is used


    if args.lr_decay > 1:
        raise ValueError('The end learning rate should be smaller than starting rate!! corrected')

    args.use_cuda = not args.no_cuda and torch.cuda.is_available()
   
    ngpus_per_node = torch.cuda.device_count()

    gpu_num = []
    for i in range(ngpus_per_node):
        gpu_num.append(i)

    print("Number of GPUs:%d", ngpus_per_node)

    gpu_devices = ','.join([str(id) for id in gpu_num])
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_devices

    if ngpus_per_node > 0:
        print("Use GPU: {} for training".format(gpu_devices))


    torch.manual_seed(args.seed + args.experiment_id)
    np.random.seed(args.seed + args.experiment_id)
    now = time.strftime("%Y_%m_%d_%H_%M_%S",time.localtime(time.time())) # to avoid overwrite
    args.out_dir = os.path.join('./outputs', 'cifar10_{}_{}_lr{}_{}_id{}'.format(args.model, args.optim,args.lr,now,args.experiment_id))
    os.makedirs(args.out_dir, exist_ok=True)

    config_save_path = os.path.join(args.out_dir, 'configs', 'config_{}.json'.format(args.experiment_id))
    os.makedirs(os.path.dirname(config_save_path), exist_ok=True)
    with open(config_save_path, 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    args.device = torch.device("cuda" if args.use_cuda else "cpu")
    print('Running on', args.device)
    print('===========================')
    for key, val in vars(args).items():
        print('{}: {}'.format(key, val))
    print('===========================\n')


    # Data augmentation for cifar10
    if args.data_augmentation:
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])
    else:
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    # Defining the dataset
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.use_cuda else {}
    train_dataset = datasets.CIFAR10('./data', train=True, download=True, transform=transform_train)

    if args.val_split > 0 and args.val_split < 1:
        val_dataset = datasets.CIFAR10('./data', train=True, download=True, transform=transform_test)

        num_train = len(train_dataset)
        indices = list(range(num_train))
        split = int(np.floor(args.val_split * num_train))
        np.random.shuffle(indices)

        train_idx, val_idx = indices[split:], indices[:split]
        train_sampler = SubsetRandomSampler(train_idx)
        val_sampler = SubsetRandomSampler(val_idx)

        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs
        )
        val_loader = torch.utils.data.DataLoader(
            val_dataset, batch_size=args.batch_size, sampler=val_sampler, **kwargs
        )
        print('{} train and {} validation datapoints.'.format(len(train_loader.sampler), len(val_loader.sampler)))
    else:
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs
        )
        val_loader = None
        print('{} train and {} validation datapoints.'.format(len(train_loader.sampler), 0))

    test_dataset = datasets.CIFAR10('./data', train=False, transform=transform_test)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=args.test_batch_size, shuffle=True, **kwargs
    )
    print('{} test datapoints.\n'.format(len(test_loader.sampler)))


    # Defining the model.
    in_channels, out_features = 3, 10
    if args.model == 'VGGBinaryConnect': #
        model = VGGBinaryConnect(in_channels, out_features, eps=1e-5, momentum=args.bnmomentum,batch_affine=(args.bn_affine==1))
    elif args.model == 'VGGBinaryConnect_STE':
        model = VGGBinaryConnect_STE(in_channels, out_features, eps=1e-5, momentum=args.bnmomentum,
                                     batch_affine=(args.bn_affine == 1))

    else:
        raise ValueError('Undefined Network')
    print(model)

    num_parameters = sum([l.nelement() for l in model.parameters()])
    print("Number of Network parameters: {}".format(num_parameters))

    model = torch.nn.DataParallel(model,device_ids=gpu_num)
    
    model = model.to(args.device)

    cudnn.benchmark = True
    # Defining the optimizer
    if args.optim == 'Adam' or args.optim == 'STE':
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

    elif args.optim == 'BayesBiNN':

        effective_trainsize = len(train_loader.sampler) * args.trainset_scale

        optimizer = BayesBiNN(model,lamda_init = args.lamda,lamda_std = args.lamda_std,  temperature = args.temperature, train_set_size=effective_trainsize, lr=args.lr, betas=args.momentum, num_samples=args.train_samples)

    # Defining the criterion
    if args.criterion == 'square-hinge':
        criterion = SquaredHingeLoss() # use the squared hinge loss for MNIST dataset
    elif args.criterion == 'cross-entropy':
        criterion = nn.CrossEntropyLoss() # this loss depends on the model output, remember to change the model output
    else:
        raise ValueError('Please select loss criterion in {square-hinge, cross-entropy}')

    start = time.time()

    # Training the model
    results = train_model(args, model, [train_loader, val_loader, test_loader], criterion, optimizer)
    model, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc = results
    save_train_history(args, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc)
    # plot_result(args, train_loss, train_acc, test_loss, test_acc)

    time_total=timeSince(start)

    print('Task completed in {:.0f}m {:.0f}s'.format(
        time_total // 60, time_total % 60))
Ejemplo n.º 19
0
num_epochs = 5

#Feed Forward Net
model = FFNet()

#Convolutional Net
#model = ConvNet()

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)

start_time = time.time()

train_acc_history, test_acc_history = \
    utils.train_model(model, criterion, optimizer, train_loader, test_loader, num_epochs)

training_time = time.time() - start_time
training_time = time.gmtime(training_time)

print(
    "Total Training Time:",
    str(training_time.tm_hour) + ":" + str(training_time.tm_min) + ":" +
    str(training_time.tm_sec))

print("Training Set Accuracy:", train_acc_history[-1])
print("Test Set Accuracy:", test_acc_history[-1])

plt.plot(range(len(train_acc_history)),
         train_acc_history,
         label='Training Data')
Ejemplo n.º 20
0
cnn = model_task2c.PR_CNN()

#----------------------------
# Optimizer and loss function
#----------------------------
for learnR in [0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, 0.1]:
    print("Learning rate : ")
    print(learnR)
    loss_fn = nn.CrossEntropyLoss()
    optimizer = optim.SGD(cnn.parameters(), lr=learnR)

    #----------------
    # Train the model
    #----------------
    _, _, _, _, best_model = utils.train_model(cnn, dataloader_train_set,
                                               dataloader_val, nb_epoch,
                                               optimizer, loss_fn)

    #---------------
    # Test the model
    #---------------
    # load test set
    test_set = []
    labels_test = []
    with open('../dataset/mnist_test.csv', 'r') as f:
        reader = csv.reader(f)
        for line in reader:
            x = np.array(line[0])
            labels_test.append(x.astype(np.float))
            y = np.array(line[1:])
            test_set.append(y.astype(np.float))
Ejemplo n.º 21
0
#  is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
    params_to_update = []
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            print("\t", name)
else:
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            print("\t", name)

# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=base_lr, momentum=0.9)

scheduler = StepLR(optimizer_ft, step_size=step_size, gamma=gamma)

#################################
# Set up training
#################################

# Setup the loss fxn
criterion = nn.CrossEntropyLoss(weight=torch.Tensor(cls_weight).to(device))

# Train and evaluate
model_ft, hist = utils.train_model(model_ft, \
    dataloaders_dict, criterion, optimizer_ft, device, scheduler, logger, output_dir, \
    num_epochs=num_epochs, is_inception=(model_name=="inception"))
Ejemplo n.º 22
0
            sweep_summaries.append(OrderedDict(value_tuple))

for i in range(num_random_hyperpars):
    print('Random hyperpar setting {} of {}'.format(i + 1,
                                                    num_random_hyperpars))
    model_save_path = model_save_path_base + '_' + str(i) + '.ckpt'
    hyperpars = {k: random.choice(v) for k, v in param_grid.items()}

    num_processing_steps = 8 if i % 2 == 0 else 16
    hyperpars['num_processing_steps'] = num_processing_steps
    separate_edge_output = i % 4 > 2
    hyperpars['separate_edge_output'] = separate_edge_output

    selected_grid = OrderedDict(sorted(hyperpars.items()))
    hyperpars.update(fixed_params)
    utils.train_model(hyperpars, TRAIN_GRAPHS, TRAIN_TARGET_GRAPHS,
                      EDGE_PERMUTATIONS, train_ids, valid_ids, model_save_path)
    valid_score, valid_mae = utils.validate_model(hyperpars, TRAIN_GRAPHS,
                                                  TRAIN_TARGET_GRAPHS,
                                                  EDGE_PERMUTATIONS, valid_ids,
                                                  model_save_path)
    validation = OrderedDict()
    validation['Score'] = valid_score
    validation['MAE'] = valid_mae
    summary_dict = OrderedDict(
        list(validation.items()) + list(selected_grid.items()))
    sweep_summaries.append(summary_dict)

    sweep_results = pd.DataFrame(sweep_summaries)
    sweep_results.to_csv(sweep_path, index=False)
Ejemplo n.º 23
0
def main(args):
    debug = (args.debug == 'True')
    print(args)
    np.random.seed(args.seed)
    with tf.Graph().as_default():
        train_dataset, num_train_file = DateSet(args.file_list, args, debug)
        test_dataset, num_test_file = DateSet(args.test_list, args, debug)
        list_ops = {}

        batch_train_dataset = train_dataset.batch(args.batch_size).repeat()
        train_iterator = batch_train_dataset.make_one_shot_iterator()
        train_next_element = train_iterator.get_next()

        batch_test_dataset = test_dataset.batch(args.batch_size).repeat()
        test_iterator = batch_test_dataset.make_one_shot_iterator()
        test_next_element = test_iterator.get_next()

        list_ops['num_train_file'] = num_train_file
        list_ops['num_test_file'] = num_test_file

        model_dir = args.model_dir
        print(model_dir)
        if 'test' in model_dir and debug and os.path.exists(model_dir):
            import shutil
            shutil.rmtree(model_dir)
        assert not os.path.exists(model_dir)
        os.mkdir(model_dir)

        print('Total number of examples: {}'.format(num_train_file))
        print('Test number of examples: {}'.format(num_test_file))
        print('Model dir: {}'.format(model_dir))

        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        list_ops['global_step'] = global_step
        list_ops['train_dataset'] = train_dataset
        list_ops['test_dataset'] = test_dataset
        list_ops['train_next_element'] = train_next_element
        list_ops['test_next_element'] = test_next_element

        epoch_size = num_train_file // args.batch_size
        print('Number of batches per epoch: {}'.format(epoch_size))

        image_batch = tf.placeholder(tf.float32,
                                     shape=(None, args.image_size,
                                            args.image_size, 3),
                                     name='image_batch')
        landmark_batch = tf.placeholder(tf.float32,
                                        shape=(None, 136),
                                        name='landmark_batch')
        attribute_batch = tf.placeholder(tf.int32,
                                         shape=(None, 6),
                                         name='attribute_batch')
        euler_angles_gt_batch = tf.placeholder(tf.float32,
                                               shape=(None, 3),
                                               name='euler_angles_gt_batch')

        list_ops['image_batch'] = image_batch
        list_ops['landmark_batch'] = landmark_batch
        list_ops['attribute_batch'] = attribute_batch
        list_ops['euler_angles_gt_batch'] = euler_angles_gt_batch

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        list_ops['phase_train_placeholder'] = phase_train_placeholder

        print('Building training graph.')
        landmarks_pre, euler_angles_pre = create_model(
            image_batch, landmark_batch, phase_train_placeholder, args)

        _sum_k = tf.reduce_sum(tf.map_fn(
            lambda x: 1 - tf.cos(abs(x)),
            euler_angles_gt_batch - euler_angles_pre),
                               axis=1)

        # attributes_w_n = tf.to_float(attribute_batch[:, 1:6])
        # mat_ratio = tf.reduce_mean(attributes_w_n, axis=0)
        # # bbb = tf.map_fn(lambda x: 1.0 / x if not x == 0.0 else float(args.batch_size), bbb)
        # mat_ratio = tf.where(tf.equal(mat_ratio, 0.0), (mat_ratio + 1) / float(args.batch_size), 1.0 / mat_ratio)
        # attributes_w_n = tf.reduce_sum(attributes_w_n * mat_ratio, axis=1)

        regularization_loss = tf.add_n(tf.losses.get_regularization_losses())

        # contour_loss = WingLoss(landmark_batch[:, 0:34], landmarks_pre[:, 0:34], 4.0, 0.50)
        # inner_brow_loss = 3*WingLoss(landmark_batch[:, 34:54], landmarks_pre[:, 34:54], 4.0, 0.50)
        # inner_nose_loss = 3*WingLoss(landmark_batch[:, 54:72], landmarks_pre[:, 54:72], 4.0, 0.50)
        # inner_eye_loss = 9*WingLoss(landmark_batch[:, 72:96], landmarks_pre[:, 72:96], 4.0, 0.50)
        # inner_mouth_loss = 15*WingLoss(landmark_batch[:, 96:136], landmarks_pre[:, 96:136], 4.0, 0.50)
        # loss_sum = tf.add_n([contour_loss, inner_brow_loss, inner_nose_loss, inner_eye_loss, inner_mouth_loss])
        # contour_loss = tf.reduce_mean(contour_loss * _sum_k)
        # inner_brow_loss = tf.reduce_mean(inner_brow_loss * _sum_k)
        # inner_nose_loss = tf.reduce_mean(inner_nose_loss * _sum_k)
        # inner_eye_loss = tf.reduce_mean(inner_eye_loss * _sum_k)
        # inner_mouth_loss = tf.reduce_mean(inner_mouth_loss * _sum_k)

        # loss_sum = L2Loss(landmark_batch, landmarks_pre)
        loss_sum = WingLoss(landmark_batch, landmarks_pre, 4.0, 0.50)
        # loss_sum = tf.reduce_mean(loss_sum*_sum_k*attributes_w_n)
        loss_sum = tf.reduce_mean(loss_sum * _sum_k)
        loss_sum += regularization_loss

        # tf.summary.scalar("contour_loss", contour_loss)
        # tf.summary.scalar("inner_brow_loss", inner_brow_loss)
        # tf.summary.scalar("inner_nose_loss", inner_nose_loss)
        # tf.summary.scalar("inner_eye_loss", inner_eye_loss)
        # tf.summary.scalar("inner_mouth_loss", inner_mouth_loss)
        tf.summary.scalar("loss", loss_sum)

        save_params = tf.trainable_variables()
        # variables_to_train = [v for v in save_params if v.name.split('/', 2)[1] == 'fc'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv1'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv2'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv3'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv4'
        #                       or v.name.split('/', 1)[0] == 'pool1'
        #                       or v.name.split('/', 1)[0] == 'Flatten'
        #                       or v.name.split('/', 1)[0] == 'pfld_fc1'
        #                       or v.name.split('/', 1)[0] == 'pfld_fc2']
        train_op, lr_op = train_model(loss_sum, global_step, num_train_file,
                                      args, save_params)

        list_ops['landmarks'] = landmarks_pre
        list_ops['train_op'] = train_op
        list_ops['lr_op'] = lr_op
        list_ops['regularization_loss'] = regularization_loss
        list_ops['loss'] = loss_sum
        # list_ops['contour_loss'] = contour_loss
        # list_ops['inner_brow_loss'] = inner_brow_loss
        # list_ops['inner_nose_loss'] = inner_nose_loss
        # list_ops['inner_eye_loss'] = inner_eye_loss
        # list_ops['inner_mouth_loss'] = inner_mouth_loss

        # from tensorflow.contrib.framework.python.framework import checkpoint_utils
        # var_list = checkpoint_utils.list_variables("./pretrained_models/model.ckpt-51")
        # for v in var_list:
        #     print(v)

        # 只加载部分权重,除了最后的输出fc层,其它层的权重都加载
        # variables_to_restore = [v for v in save_params if v.name.split('/', 2)[1] != 'fc']
        # restorer = tf.train.Saver(variables_to_restore, max_to_keep=None)

        restorer = tf.train.Saver(save_params, max_to_keep=None)
        saver = tf.train.Saver(save_params, max_to_keep=None)

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.80)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=False,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        with sess.as_default():
            if args.pretrained_model:
                pretrained_model = args.pretrained_model
                if (not os.path.isdir(pretrained_model)):
                    print('Restoring pretrained model: {}'.format(
                        pretrained_model))
                    restorer.restore(sess, args.pretrained_model)
                else:
                    print('Model directory: {}'.format(pretrained_model))
                    ckpt = tf.train.get_checkpoint_state(pretrained_model)
                    model_path = ckpt.model_checkpoint_path
                    assert (ckpt and model_path)
                    print('Checkpoint file: {}'.format(model_path))
                    restorer.restore(sess, model_path)

            if args.save_image_example:
                save_image_example(sess, list_ops, args)

            merged = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(args.loss_log, sess.graph)
            sess.graph.finalize()  # 查看循环内是否增加新的图节点
            print('Running train.')
            for epoch in range(args.max_epoch):
                train(sess, epoch_size, epoch, args.max_epoch, list_ops,
                      merged, summary_writer)
                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                metagraph_path = os.path.join(model_dir, 'model.meta')
                saver.save(sess,
                           checkpoint_path,
                           global_step=epoch,
                           write_meta_graph=False)
                if not os.path.exists(metagraph_path):
                    saver.export_meta_graph(metagraph_path)

                test(sess, list_ops, args)
Ejemplo n.º 24
0
data_dir = results.data_dir
structure = results.arch
save_dir = results.save_dir
learning_rate = results.learning_rate
hidden_units = results.hidden_units
epochs = results.epochs
gpu = results.gpu

device = torch.device("cuda:0" if gpu else "cpu")

#Load data
dataloaders, image_datasets = utils.load_data(data_dir)

#Setup model parameters
model, criterion, optimizer = utils.pretrained_model(structure, hidden_units, learning_rate)
model = model.to(device)

#train model
model = utils.train_model(model, criterion, optimizer, dataloaders, device, epochs)

#save model
model.class_to_idx = image_datasets['train'].class_to_idx

checkpoint = {'epoch': epochs, 
              'class_to_idx': model.class_to_idx,
              'layers': results.hidden_units,
              'optimizer_state_dict': optimizer.state_dict(), 
              'model_state_dict': model.state_dict()}

torch.save(checkpoint, 'checkpoint2.pth')
Ejemplo n.º 25
0
def train(checkpoint_path="checkpoints/", checkpoint_name=None):
	freeze_layers = False
	input_shape = 299
	# batch_size = 256 # For resnet101 on 2070
	# batch_size = 16 # For resnet101 on 2070
	batch_size = 200
	mean = [0.5, 0.5, 0.5]
	std = [0.5, 0.5, 0.5]
	scale = 299
	use_parallel = False
	use_gpu = True
	epochs = 100

	epoch = 0
	global_step = 0
	# model_conv = torchvision.models.resnet50(pretrained="imagenet").cuda()
	# model_conv = torchvision.models.resnet101(pretrained="imagenet").cuda()
	model_conv = torchvision.models.resnet18(pretrained="imagenet").cuda()


		# Stage-1 Freezing all the layers 
	# if freeze_layers:
	# 	for i, param in model_conv.named_parameters():
	# 		param.requires_grad = False

	n_class = len(glob(data_dir + "train/*"))

	# Since imagenet as 1000 classes , We need to change our last layer according to the number of classes we have,
	num_ftrs = model_conv.fc.in_features
	model_conv.fc = nn.Linear(num_ftrs, n_class).cuda()

	# model_conv.fc = nn.Sequential(nn.Linear(num_ftrs, 512),
#                                # nn.ReLU(),
#                                # nn.Dropout(0.2),
#                                # nn.Linear(512, n_class),
#                                nn.LogSoftmax(dim=1)).cuda()

	# model_conv.fc = nn.Sequential(nn.Linear(num_ftrs, n_class),
	# 						 # nn.ReLU(),
	# 						 # nn.Dropout(0.2),
	# 						 # nn.Linear(512, n_class),
	# 						 nn.LogSoftmax(dim=1)).cuda()


	if checkpoint_name != None:
		checkpoint = torch.load(checkpoint_path + checkpoint_name)
		model_conv.load_state_dict(checkpoint["model_state_dict"])
		epoch = int(checkpoint["epoch"] + 1)
		global_step = int(checkpoint["global_step"])

	data_transforms = {
			'train': transforms.Compose([
			transforms.CenterCrop(input_shape),
			transforms.Resize(scale),
			transforms.RandomResizedCrop(input_shape),
			transforms.RandomHorizontalFlip(),
	#         transforms.RandomVerticalFlip(),
			transforms.ColorJitter(hue=.05, saturation=.05, brightness=.15, contrast=.05),
			transforms.RandomRotation(degrees=90),
			transforms.ToTensor(),
			transforms.Normalize(mean, std)]),
			'val': transforms.Compose([
			transforms.CenterCrop(input_shape),
			transforms.Resize(scale),
			transforms.ToTensor(),
			transforms.Normalize(mean, std)]),
	}

	image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
									  data_transforms[x]) for x in ['train', 'val']}
	dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,
											 shuffle=True, num_workers=8) for x in ['train', 'val']}

	dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
	class_names = image_datasets['train'].classes

	if use_parallel:
		print("[Using all the available GPUs]")
		model_conv = nn.DataParallel(model_conv, device_ids=[0, 1])

	print("[Using CrossEntropyLoss...]")
	criterion = nn.CrossEntropyLoss()

	print("[Using small learning rate with momentum...]")
	optimizer_conv = optim.SGD(list(filter(lambda p: p.requires_grad, model_conv.parameters())), lr=0.001, momentum=0.9)
	if checkpoint_name != None:
		optimizer_conv.load_state_dict(checkpoint["optimizer_state_dict"])


	model_conv, optimizer_conv = amp.initialize(model_conv, optimizer_conv, opt_level="O1")
	print("[Creating Learning rate scheduler...]")
	exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)

	print("[Training the model begun ....]")
	model_ft = utils.train_model(model_conv, dataloaders, dataset_sizes, criterion, optimizer_conv, exp_lr_scheduler, use_gpu,
					 num_epochs=epochs, checkpoint_path=checkpoint_path, epoch_start=int(epoch), global_step=int(global_step))
    model_name = args.model_name
    learning_rate = args.learning_rate
    batch_size = args.batch_size
    epochs = args.epochs
    seed = args.seed
    max_len = args.max_len
    class_names = ['negative', 'neutral', 'positive']
    train_path = f'{args.data_folder}/train.csv'
    validation_path = f'{args.data_folder}/validation.csv'
    test_path = f'{args.data_folder}/test.csv'
    output_folder = args.output_folder

    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    tokenizer = BertTokenizer.from_pretrained(model_name)
    # CREATE DATA LOADERS
    train_data_loader, df_train = create_data_loader(train_path, tokenizer,
                                                     max_len, batch_size)
    val_data_loader, df_val = create_data_loader(validation_path, tokenizer,
                                                 max_len, batch_size)
    test_data_loader, df_test = create_data_loader(test_path, tokenizer,
                                                   max_len, batch_size)

    # INSTANTIATE MODEL
    model = SentimentClassifier(len(class_names), model_name)
    model = model.to(device)

    train_model(model, train_data_loader, df_train, val_data_loader, df_val,
                epochs, learning_rate, device, output_folder)
Ejemplo n.º 27
0
        #L[sens_arg-1]=-1
        X.append(L)

        if (int(line1[-1]) == 0):
            Y.append(-1)
        else:
            Y.append(1)

X = np.array(X, dtype=float)
Y = np.array(Y, dtype=float)
sensitive[name] = np.array(sens, dtype=float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {
    name: cov
}

gamma = None

w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint,
                   sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)

i = 2
inp = []
while len(inp) < 20:
    inp.append(int(sys.argv[i]))
    i += 1

print np.sign(np.dot(w, inp))
Ejemplo n.º 28
0
from pyspark import SparkContext
from pyspark.sql import SparkSession
from get_data import get_all_memes, save_meme_tags, get_memedroid_data, get_twitter_data, get_imgur_data, get_reddit_data
from utils import train_model, get_popular_tag, get_source_upvotes, upload_blob, upload_to_bucket
from pyspark.ml import PipelineModel
from google.cloud import storage

sc = SparkContext.getOrCreate()
spark = SparkSession.builder.getOrCreate()

number_of_clusters = 7

memes_df = get_all_memes(spark)
memes_df = memes_df.cache()
memes_df_train = memes_df.drop_duplicates(subset=['text'])
model = train_model(memes_df_train, number_of_clusters, 50)

# model = PipelineModel.load("hdfs:///models/model")
X = model.transform(memes_df)
X = X.select('id', 'prediction', 'additional_data', 'source')
X = X.cache()

save_meme_tags(X, number_of_clusters)

cluster_names = []
for cluster_id in range(number_of_clusters):
    words = sc.textFile("hdfs:///tags/all_tags_{0}".format(
        cluster_id)).flatMap(lambda line: line.split(" "))
    wordCounts = words.map(lambda word: (re.sub(
        '[^A-Za-z0-9]+', '', word.lower()), 1)).reduceByKey(lambda a, b: a + b)
    tags = wordCounts.sortBy(lambda atuple: atuple[1],
Ejemplo n.º 29
0
        vocab_size = len(tok_to_ix)
        output_size = len(np.unique(labels_train))

        model = GRU(hidden_size=hidden_size,
                    num_layers=num_layers,
                    vocab_size=vocab_size,
                    output_size=output_size)
        model = model.to(device)
        loss_func = nn.CrossEntropyLoss()
        optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)

        model = train_model(model=model,
                            loss_func=loss_func,
                            optimizer=optimizer,
                            data_train=data_train,
                            labels_train=labels_train,
                            n_epochs=args.n_epochs,
                            batch_size=args.batch_size,
                            save_path=SAVE_PATH,
                            device=device)
    else:
        """
        File was not run with --train_from_scratch, so simply load the model from its saved path
        """
        model = torch.load(SAVE_PATH)
    """
    Whether we're training or just loading the pretrained model, we finish by
    evaluating the model on the testing set.
    """
    evaluate_model(model=model,
                   tok_to_ix=tok_to_ix,
Ejemplo n.º 30
0
dimension = 14
step_size = 1
batch_size = 32
epoch = 10000
Num_data = 53860
a = 6  # parameter to set the No of hidden units
hidden_unit = int(Num_data / (a * (window_size * dimension + dimension)))
print('number of hidden unit is ' + str(hidden_unit))
hidden_unit = 200
train_data, train_label = utils.load_data_one_step_prediction(
    data_path, step_size=step_size, window_size=window_size, moving_only=False)
train_data, train_label = utils.normalize(train_data, train_label)
print(len(train_data))
print(len(train_label))
print(train_data[0].shape)
print(train_label[0].shape)
model2 = models.onestepModel(train_data[0].shape, hidden_units=hidden_unit)
model = model2.build_model()
model, call_back = utils.train_model(model,
                                     train_data,
                                     train_label,
                                     batch_s=batch_size,
                                     epo=epoch)
data_path = os.getcwd() + '/Data/NY531/test/'
test_data, test_label = utils.load_data_one_step_prediction(
    data_path, step_size=step_size, window_size=window_size, moving_only=False)
print(len(test_data))
print(len(test_label))
test_data, test_label = utils.normalize(test_data, test_label)
utils.save_result_one_step_prediction(model, test_data, test_label, call_back)
Ejemplo n.º 31
0
        #tr_recs = open(tr_rec_name,'r').read().split('\n')[:e0]
        rec = max([float(r) for r in va_recs])

    else:
        e0 = 0
        rec = 0

    if args.totrain:
        print '\nstart training from epoch {}'.format(e0)
        train_model(model,
                    train_x[:size],
                    train_y[:size],
                    valid_x,
                    valid_y,
                    lr0,
                    lrdecay,
                    bs,
                    epochs,
                    0,
                    name,
                    e0,
                    rec,
                    print_every=999999)
    else:
        print '\nno training'

    tr_acc = evaluate_model(model.predict_proba, train_x[:size],
                            train_y[:size])
    print 'train acc: {}'.format(tr_acc)

    va_acc = evaluate_model(model.predict_proba, valid_x, valid_y, n_mc=200)
    print 'valid acc: {}'.format(va_acc)
        conv1x1 = conv2d_bn(x, 512, 1)

        conv3x3 = conv2d_bn(x, 440, 1)
        conv1x3 = conv2d_bn(conv3x3, 512, (1, 3))
        conv3x1 = conv2d_bn(conv3x3, 512, (3, 1))

        conv3x3dbl = conv2d_bn(x, 440, 1)
        conv3x3dbl = conv2d_bn(conv3x3dbl, 440, (3, 3))
        conv1x3dbl = conv2d_bn(conv3x3dbl, 512, (1, 3))
        conv3x1dbl = conv2d_bn(conv3x3dbl, 512, (3, 1))

        pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
        pool = conv2d_bn(pool, 512, 1)

        x = concatenate([conv1x1, conv1x3, conv3x1, conv1x3dbl, conv3x1dbl, pool], name='inception3_mix' + str(i))

    x = GlobalAveragePooling2D()(x)

    flattened = Flatten()(x)
    outputs = Dense(nb_classes, activation='softmax')(flattened)

    model = Model(inputs=input, outputs=outputs)

    return model


model = Inceptionv3(nb_classes, input_shape=INPUT_SHAPE)
model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['accuracy'])

train_model(model)