Ejemplo n.º 1
0
# a settings.json file should be included for logging to comet.
# ----------------------------------------------------------------
if args.debug:
    args.comet = False  # no logging when debugging.
    import ipdb
    ipdb.set_trace()

if args.comet:
    if args.comet == True and os.path.isfile("./rlscripts/settings.json"):
        with open('./rlscripts/settings.json') as f:
            data = json.load(f)
        args.comet_apikey = data["api_key"]
        args.comet_username = data["workspace"]
        args.comet_project = data["project_name"]
    experiment = Experiment(api_key=args.comet_apikey,
                            project_name=args.comet_project,
                            workspace=args.comet_username,
                            auto_output_logging="native")
    experiment.set_name(args.namestr)
    experiment.add_tag(args.tag)
    args.experiment = experiment

# Globals
episode_num = 0
last_time = time.time()
order_choices = np.arange(3)
save_name = args.namestr + "_run"
action_sequence = []
state_draw_sequence = []
state_position_sequence = []
decision_time_logs = []
reward_logs = []
Ejemplo n.º 2
0
import os
import numpy
import torch
import dataset
import time
import model
import train1 as train
import test1 as test
import warnings

warnings.simplefilter('ignore')
start_time = time.time()

if o.opt.comet:
    experiment = Experiment(api_key="aSb5hPnLFt1wjOyjJfTJy4fkJ",
                            project_name="general",
                            workspace="arjunsbalaji")
else:
    experiment = None
    sys.stdout.write('No comet logging' + '\n')

if o.opt.loadcheckpoint is not None:
    checkpoint = torch.load(o.opt.loadcheckpoint)
else:
    checkpoint = None

data = dataset.OCTDataset(o.opt.dataroot,
                          start_size=o.opt.start_size,
                          cropped_size=o.opt.c_size,
                          transform=o.opt.transforms,
                          input_images=[0, 1, 2])
Ejemplo n.º 3
0
from __future__ import print_function

from comet_ml import Experiment

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.datasets import fetch_20newsgroups
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score


experiment  = Experiment(api_key="YOUR-API-KEY", project_name=' 20 newsgroups project')

# Get dataset and put into train,test lists
categories = ['alt.atheism', 'soc.religion.christian',
              'comp.graphics', 'sci.med']

twenty_train = fetch_20newsgroups(subset='train',categories=categories, shuffle=True, random_state=42)
twenty_test = fetch_20newsgroups(subset='test',categories=categories, shuffle=True, random_state=42)

#log hash of your dataset to Comet.ml
experiment.log_dataset_hash(twenty_train)

# Build training pipeline

text_clf = Pipeline([('vect', CountVectorizer()), # Counts occurrences of each word
                     ('tfidf', TfidfTransformer()), # Normalize the counts based on document length
                     ('clf', SGDClassifier(loss='hinge', penalty='l2', # Call classifier with vector
                                           alpha=1e-3, random_state=42,
                                           max_iter=5, tol=None)),
Ejemplo n.º 4
0
def run_HAC(FLAGS, env, agent):
    experiment = Experiment(api_key="M03EcOc9o9kiG95hws4mq1uqI",
                            project_name="HAC",
                            workspace="antonwiehe")

    # Print task summary
    print_summary(FLAGS, env)

    # Determine training mode.  If not testing and not solely training, interleave training and testing to track progress
    mix_train_test = False
    if not FLAGS.test and not FLAGS.train_only:
        mix_train_test = True

    for batch in range(NUM_BATCH):

        num_episodes = agent.other_params["num_exploration_episodes"]

        # Evaluate policy every TEST_FREQ batches if interleaving training and testing
        if mix_train_test and batch % TEST_FREQ == 0:
            print("\n--- TESTING ---")
            agent.FLAGS.test = True
            num_episodes = num_test_episodes

            # Reset successful episode counter
            successful_episodes = 0

        for episode in range(num_episodes):

            print("\nBatch %d, Episode %d" % (batch, episode))

            # Train for an episode
            success = agent.train(env, episode)

            if success:
                print("Batch %d, Episode %d End Goal Achieved\n" %
                      (batch, episode))

                # Increment successful episode counter if applicable
                if mix_train_test and batch % TEST_FREQ == 0:
                    successful_episodes += 1

        # Save agent
        agent.save_model(episode)

        # Finish evaluating policy if tested prior batch
        if mix_train_test and batch % TEST_FREQ == 0:

            # Log performance
            success_rate = successful_episodes / num_test_episodes * 100
            print("\nTesting Success Rate %.2f%%" % success_rate)
            agent.log_performance(success_rate)
            agent.FLAGS.test = False

            experiment.set_step(batch)
            experiment.log_metric("Success rate", success_rate)
            success_list.append(success_rate)
            with open("successRates.csv", 'w', newline='') as myfile:
                wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
                wr.writerow(success_list)

            if success_rate > 95:
                print("Success rate over 95\%!")
                break

            print("\n--- END TESTING ---\n")
from comet_ml import Experiment
import matplotlib

experiment = Experiment("m487mKQwNTqFF4z7aZRX3Xv19",
                        project_name="FCN_CS",
                        log_env_gpu=True)
matplotlib.use("Agg")

# import packages
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
# from sklearn.metrics import classification_report
# from imutils import paths
from keras.optimizers import Adam
from dataLoader import Dataloader
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
import os
from Models import UNET, FCN
import tensorflow as tf
from keras import backend as K


def weighted_crossentropy(class_weights):
    weights = class_weights

    def w_cce(y_true, y_pred):
        def _to_tensor(x, dtype):
            """Convert the input `x` to a tensor of type `dtype`.
            # Arguments
Ejemplo n.º 6
0
def train():
    """Train SqueezeSeg model"""
    assert FLAGS.dataset == 'KITTI', \
        'Currently only support KITTI dataset'

    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu

    with tf.Graph().as_default():

        assert FLAGS.net == 'squeezeSeg', \
            'Selected neural net architecture not supported: {}'.format(FLAGS.net)

        if FLAGS.net == 'squeezeSeg':
            mc = kitti_squeezeSeg_config()
            mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
            model = SqueezeSeg(mc)

        imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)

        # save model size, flops, activations by layers
        with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'),
                  'w') as f:
            f.write('Number of parameter by layer:\n')
            count = 0
            for c in model.model_size_counter:
                f.write('\t{}: {}\n'.format(c[0], c[1]))
                count += c[1]
            f.write('\ttotal: {}\n'.format(count))

            count = 0
            f.write('\nActivation size by layer:\n')
            for c in model.activation_counter:
                f.write('\t{}: {}\n'.format(c[0], c[1]))
                count += c[1]
            f.write('\ttotal: {}\n'.format(count))

            count = 0
            f.write('\nNumber of flops by layer:\n')
            for c in model.flop_counter:
                f.write('\t{}: {}\n'.format(c[0], c[1]))
                count += c[1]
            f.write('\ttotal: {}\n'.format(count))
        f.close()
        print('Model statistics saved to {}.'.format(
            os.path.join(FLAGS.train_dir, 'model_metrics.txt')))

        def enqueue(sess, coord):
            with coord.stop_on_exception():
                while not coord.should_stop():
                    # read batch input
                    lidar_per_batch, lidar_mask_per_batch, label_per_batch,\
                        weight_per_batch = imdb.read_batch()

                    feed_dict = {
                        model.ph_keep_prob: mc.KEEP_PROB,
                        model.ph_lidar_input: lidar_per_batch,
                        model.ph_lidar_mask: lidar_mask_per_batch,
                        model.ph_label: label_per_batch,
                        model.ph_loss_weight: weight_per_batch,
                    }

                    sess.run(model.enqueue_op, feed_dict=feed_dict)

        saver = tf.train.Saver(tf.all_variables())
        summary_op = tf.summary.merge_all()
        init = tf.initialize_all_variables()

        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

        coord = tf.train.Coordinator()
        enq_threads = []
        for _ in range(mc.NUM_ENQUEUE_THREAD):
            eqth = threading.Thread(target=enqueue, args=[sess, coord])
            eqth.start()
            enq_threads.append(eqth)

        run_options = tf.RunOptions(timeout_in_ms=60000)

        # Create an experiment with your api key
        experiment = Experiment(api_key="lISr0JWgyUIsox8HYPC3isnTP",
                                project_name="squeezeSeg_1080ti",
                                workspace="asimonov")
        hyper_params = {
            "learning_rate": mc.LEARNING_RATE,
            "steps": FLAGS.max_steps,
            "batch_size": mc.BATCH_SIZE
        }
        experiment.log_multiple_params(hyper_params)
        # some_param = "some value"
        # experiment.log_parameter("param name", some_param)

        try:
            for step in xrange(FLAGS.max_steps):
                start_time = time.time()

                experiment.set_step(step)

                if step % FLAGS.summary_step == 0 or step == FLAGS.max_steps - 1:
                    op_list = [
                        model.lidar_input, model.lidar_mask, model.label,
                        model.train_op, model.loss, model.pred_cls, summary_op
                    ]

                    lidar_per_batch, lidar_mask_per_batch, label_per_batch, \
                        _, loss_value, pred_cls, summary_str = sess.run(op_list,
                                                                        options=run_options)

                    experiment.log_metric("loss", loss_value)

                    label_image = visualize_seg(label_per_batch[:6, :, :], mc)
                    pred_image = visualize_seg(pred_cls[:6, :, :], mc)

                    # Run evaluation on the batch
                    ious, _, _, _ = evaluate_iou(
                        label_per_batch,
                        pred_cls * np.squeeze(lidar_mask_per_batch),
                        mc.NUM_CLASS)

                    feed_dict = {}
                    # Assume that class-0 is the background class
                    for i in range(1, mc.NUM_CLASS):
                        feed_dict[model.iou_summary_placeholders[i]] = ious[i]

                    iou_summary_list = sess.run(model.iou_summary_ops[1:],
                                                feed_dict)

                    # Run visualization
                    viz_op_list = [
                        model.show_label, model.show_depth_img, model.show_pred
                    ]
                    viz_summary_list = sess.run(viz_op_list,
                                                feed_dict={
                                                    model.depth_image_to_show:
                                                    lidar_per_batch[:6, :, :,
                                                                    [4]],
                                                    model.label_to_show:
                                                    label_image,
                                                    model.pred_image_to_show:
                                                    pred_image,
                                                })

                    # Add summaries
                    summary_writer.add_summary(summary_str, step)

                    for sum_str in iou_summary_list:
                        summary_writer.add_summary(sum_str, step)

                    for viz_sum in viz_summary_list:
                        summary_writer.add_summary(viz_sum, step)

                    # force tensorflow to synchronise summaries
                    summary_writer.flush()

                else:
                    _, loss_value = sess.run([model.train_op, model.loss],
                                             options=run_options)

                duration = time.time() - start_time

                assert not np.isnan(loss_value), \
                    'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
                    'class_loss: {}'.format(loss_value, conf_loss, bbox_loss,
                                            class_loss)

                if step % 10 == 0:
                    num_images_per_step = mc.BATCH_SIZE
                    images_per_sec = num_images_per_step / duration
                    sec_per_batch = float(duration)
                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, loss_value,
                                        images_per_sec, sec_per_batch))
                    sys.stdout.flush()

                # Save the model checkpoint periodically.
                if step % FLAGS.checkpoint_step == 0 or step == FLAGS.max_steps - 1:
                    checkpoint_path = os.path.join(FLAGS.train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
        except Exception as e:
            coord.request_stop(e)
        finally:
            coord.request_stop()
            sess.run(model.q.close(cancel_pending_enqueues=True))
            coord.join(enq_threads)
Ejemplo n.º 7
0
from comet_ml import Experiment
experiment = Experiment(api_key="8gFIuv61aMnLn2YmtGHULdr1P",
                        project_name="model_learn",
                        workspace="basarane")

import argparse

parser = argparse.ArgumentParser(description='DQN Training')
parser.add_argument('game', type=str, default='Breakout', help='Gym game name')
parser.add_argument('--env-weightfile',
                    type=str,
                    default=None,
                    help='load environment weights')
parser.add_argument('--env-model',
                    type=str,
                    default=None,
                    help='class name of environment model')
parser.add_argument('--logdir',
                    type=str,
                    default=None,
                    help='Logdir for tensorboard')
parser.add_argument('--max-step', type=int, default=int(1e10), help='max step')
parser.add_argument('--max-episode',
                    type=int,
                    default=int(10),
                    help='max episode')
parser.add_argument('--test-epsilon',
                    type=float,
                    default=0.05,
                    help='epsilon for testing')
parser.add_argument('--load-weightfile',
Ejemplo n.º 8
0
- separate policies - done working
- gradient policy
- use messages which aproximetes the other value function
- a2c
- curiosity

-- save git commit key as param 
'''

if __name__ == '__main__': 
    if True:
        args = getA()
        pars = vars(args)    
        
        experiment = Experiment(api_key="ubnNI8IwcycXWmKD7eT7YlP4J", auto_output_logging=None,auto_metric_logging=False,
                        disabled=pars['debug'] == '1',
                        project_name="general", workspace="ionmosnoi")
        
        experiment.log_parameters(pars)
        #add_tags(['dql','sep','gather'])
        print(pars)
        pars['results_path'] += pars['name']
        #pars['results_path'] += pars['name']
        env = None

        #temp = env.render_env()
        #env.reset()

        #e = get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy()
        #print(e.shape, np.amax(e), np.amin(e))
        job.setName(pars['name'])
Ejemplo n.º 9
0
from comet_ml import Experiment

# Add the following code anywhere in your machine learning file
exp = Experiment(api_key="VeFWrxnaD0wAfQjDXoLy8gdiL",
                 project_name="3d-vae",
                 workspace="axis-bit")

import argparse
import os
import time

import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
from torch.autograd import Variable
from torch.utils import data

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import torch.nn.functional as F
from torch.optim.lr_scheduler import ExponentialLR

import pandas as pd
import numpy as np
from scipy import ndimage
 train_dl, val_dl, test_dl = get_dataloaders(
     project.data_dir / "train",
     project.data_dir / "val",
     val_transform=val_transform,
     train_transform=train_transform,
     batch_size=params['batch_size'],
     pin_memory=True,
     num_workers=4,
 )
 # is always good practice to visualise some of the train and val images to be sure data-aug
 # is applied properly
 show_dl(train_dl)
 show_dl(test_dl)
 # define our comet experiment
 experiment = Experiment(api_key="YOU_KEY",
                         project_name="dl-pytorch-template",
                         workspace="francescosaveriozuppichini")
 experiment.log_parameters(params)
 # create our special resnet18
 cnn = resnet18(2).to(device)
 # print the model summary to show useful information
 logging.info(summary(cnn, (3, 224, 244)))
 # define custom optimizer and instantiace the trainer `Model`
 optimizer = optim.Adam(cnn.parameters(), lr=params['lr'])
 model = Model(cnn, optimizer, "cross_entropy",
               batch_metrics=["accuracy"]).to(device)
 # usually you want to reduce the lr on plateau and store the best model
 callbacks = [
     ReduceLROnPlateau(monitor="val_acc", patience=5, verbose=True),
     ModelCheckpoint(str(project.checkpoint_dir /
                         f"{time.time()}-model.pt"),
Ejemplo n.º 11
0
    # --------------------------
    # -----  Load Options  -----
    # --------------------------
    root = Path(__file__).parent.resolve()
    opts = load_opts(path=root / args.config, default=root / "shared/defaults.yml")
    opts = set_mode("train", opts)
    flats = flatten_opts(opts)
    print_opts(flats)

    # ------------------------------------
    # -----  Start Comet Experiment  -----
    # ------------------------------------
    wsp = args.get("workspace") or opts.comet.workspace
    prn = args.get("project_name") or opts.comet.project_name
    comet_exp = Experiment(workspace=wsp, project_name=prn)
    comet_exp.log_asset(file_data=str(root / args.config), file_name=root / args.config)
    comet_exp.log_parameters(flats)

    # ----------------------------
    # -----  Create loaders  -----
    # ----------------------------
    print("Creating loaders:")
    # ! important to do test first
    val_opt = set_mode("test", opts)
    val_loader = get_loader(
        val_opt, real=True, depth=opts.data.use_depth, no_check=args.no_check
    )
    train_loader = get_loader(
        opts, real=True, depth=opts.data.use_depth, no_check=args.no_check
    )
Ejemplo n.º 12
0
def main():

    opt = parse_option()

    # dataloader
    train_partition = "trainval" if opt.use_trainval else "train"
    if opt.dataset == "miniImageNet":
        train_trans, test_trans = transforms_options[opt.transform]
        train_loader = DataLoader(
            ImageNet(args=opt, partition=train_partition, transform=train_trans),
            batch_size=opt.batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=opt.num_workers,
        )
        val_loader = DataLoader(
            ImageNet(args=opt, partition="val", transform=test_trans),
            batch_size=opt.batch_size // 2,
            shuffle=False,
            drop_last=False,
            num_workers=opt.num_workers // 2,
        )
        # meta_testloader = DataLoader(
        #     MetaImageNet(
        #         args=opt,
        #         partition="test",
        #         train_transform=train_trans,
        #         test_transform=test_trans,
        #     ),
        #     batch_size=opt.test_batch_size,
        #     shuffle=False,
        #     drop_last=False,
        #     num_workers=opt.num_workers,
        # )
        # meta_valloader = DataLoader(
        #     MetaImageNet(
        #         args=opt,
        #         partition="val",
        #         train_transform=train_trans,
        #         test_transform=test_trans,
        #     ),
        #     batch_size=opt.test_batch_size,
        #     shuffle=False,
        #     drop_last=False,
        #     num_workers=opt.num_workers,
        # )
        if opt.use_trainval:
            n_cls = 80
        else:
            n_cls = 64
    elif opt.dataset == "tieredImageNet":
        train_trans, test_trans = transforms_options[opt.transform]
        train_loader = DataLoader(
            TieredImageNet(args=opt, partition=train_partition, transform=train_trans),
            batch_size=opt.batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=opt.num_workers,
        )
        val_loader = DataLoader(
            TieredImageNet(args=opt, partition="train_phase_val", transform=test_trans),
            batch_size=opt.batch_size // 2,
            shuffle=False,
            drop_last=False,
            num_workers=opt.num_workers // 2,
        )
        meta_testloader = DataLoader(
            MetaTieredImageNet(
                args=opt,
                partition="test",
                train_transform=train_trans,
                test_transform=test_trans,
            ),
            batch_size=opt.test_batch_size,
            shuffle=False,
            drop_last=False,
            num_workers=opt.num_workers,
        )
        meta_valloader = DataLoader(
            MetaTieredImageNet(
                args=opt,
                partition="val",
                train_transform=train_trans,
                test_transform=test_trans,
            ),
            batch_size=opt.test_batch_size,
            shuffle=False,
            drop_last=False,
            num_workers=opt.num_workers,
        )
        if opt.use_trainval:
            n_cls = 448
        else:
            n_cls = 351
    elif opt.dataset == "CIFAR-FS" or opt.dataset == "FC100":
        train_trans, test_trans = transforms_options["D"]

        train_loader = DataLoader(
            CIFAR100(args=opt, partition=train_partition, transform=train_trans),
            batch_size=opt.batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=opt.num_workers,
        )
        val_loader = DataLoader(
            CIFAR100(args=opt, partition="train", transform=test_trans),
            batch_size=opt.batch_size // 2,
            shuffle=False,
            drop_last=False,
            num_workers=opt.num_workers // 2,
        )
        meta_testloader = DataLoader(
            MetaCIFAR100(
                args=opt,
                partition="test",
                train_transform=train_trans,
                test_transform=test_trans,
            ),
            batch_size=opt.test_batch_size,
            shuffle=False,
            drop_last=False,
            num_workers=opt.num_workers,
        )
        meta_valloader = DataLoader(
            MetaCIFAR100(
                args=opt,
                partition="val",
                train_transform=train_trans,
                test_transform=test_trans,
            ),
            batch_size=opt.test_batch_size,
            shuffle=False,
            drop_last=False,
            num_workers=opt.num_workers,
        )
        if opt.use_trainval:
            n_cls = 80
        else:
            if opt.dataset == "CIFAR-FS":
                n_cls = 64
            elif opt.dataset == "FC100":
                n_cls = 60
            else:
                raise NotImplementedError(
                    "dataset not supported: {}".format(opt.dataset)
                )
    else:
        raise NotImplementedError(opt.dataset)

    # model
    model = create_model(opt.model, n_cls, opt.dataset, opt.drop_rate, opt.dropblock)

    # optimizer
    if opt.adam:
        optimizer = torch.optim.Adam(
            model.parameters(), lr=opt.learning_rate, weight_decay=0.0005
        )
    else:
        optimizer = optim.SGD(
            model.parameters(),
            lr=opt.learning_rate,
            momentum=opt.momentum,
            weight_decay=opt.weight_decay,
        )

    criterion = nn.CrossEntropyLoss()

    if torch.cuda.is_available():
        if opt.n_gpu > 1:
            model = nn.DataParallel(model)
        model = model.cuda()
        criterion = criterion.cuda()
        cudnn.benchmark = True

    # tensorboard
    logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
    comet_logger = Experiment(
        api_key=os.environ["COMET_API_KEY"],
        project_name=opt.comet_project_name,
        workspace=opt.comet_workspace,
        disabled=not opt.logcomet,
    )
    comet_logger.set_name(opt.model_name)
    comet_logger.log_parameters(vars(opt))

    # set cosine annealing scheduler
    if opt.cosine:
        eta_min = opt.learning_rate * (opt.lr_decay_rate ** opt.cosine_factor)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, opt.epochs, eta_min, -1
        )

    # routine: supervised pre-training
    for epoch in range(1, opt.epochs + 1):

        if opt.cosine:
            scheduler.step()
        else:
            adjust_learning_rate(epoch, opt, optimizer)
        print("==> training...")

        time1 = time.time()
        with comet_logger.train():
            train_acc, train_loss = train(
                epoch, train_loader, model, criterion, optimizer, opt
            )
            comet_logger.log_metrics(
                {"acc": train_acc.cpu(), "loss_epoch": train_loss}, epoch=epoch
            )
        time2 = time.time()
        print("epoch {}, total time {:.2f}".format(epoch, time2 - time1))

        logger.log_value("train_acc", train_acc, epoch)
        logger.log_value("train_loss", train_loss, epoch)

        with comet_logger.validate():
            test_acc, test_acc_top5, test_loss = validate(
                val_loader, model, criterion, opt
            )
            comet_logger.log_metrics(
                {"acc": test_acc.cpu(), "acc_top5": test_acc_top5.cpu(), "loss": test_loss,},
                epoch=epoch,
            )

        logger.log_value("test_acc", test_acc, epoch)
        logger.log_value("test_acc_top5", test_acc_top5, epoch)
        logger.log_value("test_loss", test_loss, epoch)

        # regular saving
        if epoch % opt.save_freq == 0:
            print("==> Saving...")
            state = {
                "epoch": epoch,
                "model": model.state_dict()
                if opt.n_gpu <= 1
                else model.module.state_dict(),
            }
            save_file = os.path.join(
                opt.save_folder, "ckpt_epoch_{epoch}.pth".format(epoch=epoch)
            )
            torch.save(state, save_file)

    # save the last model
    state = {
        "opt": opt,
        "model": model.state_dict() if opt.n_gpu <= 1 else model.module.state_dict(),
    }
    save_file = os.path.join(opt.save_folder, "{}_last.pth".format(opt.model))
    torch.save(state, save_file)
Ejemplo n.º 13
0
import tensorflow_probability as tfp
from tensorflow.keras.datasets import mnist
from load_dataset import MNISTSequence
import matplotlib
matplotlib.use('Agg')  # backend model
from matplotlib import figure  # pylint: disable=g-import-not-at-top
from matplotlib.backends import backend_agg
import numpy as np
tfd = tfp.distributions
try:
    import seaborn as sns  # pylint: disable=g-import-not-at-top
    HAS_SEABORN = True
except ImportError:
    HAS_SEABORN = False

experiment = Experiment()
IMAGE_SHAPE = [28, 28, 1]
NUM_TRAIN_EXAMPLES = 60000
NUM_HELDOUT_EXAMPLES = 10000
NUM_CLASSES = 10

learning_rate = 0.001
num_epochs = 10
batch_size = 128
data_dir = "data/"
save_model_dir = "model/"
num_monte_carlo = 30


def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
    """Save a PNG plot with histograms of weight means and stddevs.
Ejemplo n.º 14
0
from comet_ml import Experiment
experiment = Experiment(api_key="vPCPPZrcrUBitgoQkvzxdsh9k", parse_args=False, project_name='unnamed')
import utils
import torch
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data
from os.path import join, basename, dirname, exists
import os
from glob import glob
import pickle
from PIL import Image
import numpy as np
from matplotlib.pyplot import plot, imshow, colorbar, show, axis, hist, subplot, xlabel, ylabel, title, legend, savefig, figure, close, suptitle, tight_layout, xlim, ylim
import matplotlib.pyplot as plt

transformcompose = transforms.Compose([
  transforms.ToTensor(),
  # transforms.Normalize(datamean, datastd),
])

home = os.environ['HOME']
# dataset = torchvision.datasets.SVHN(join(home, 'datasets/SVHN'), train=True, download=True, transform=transformcompose)
# dataset = torchvision.datasets.CIFAR10(join(home, 'datasets'), tag, download=True, transform=transformcompose)
dataset = torchvision.datasets.FashionMNIST(join(home, 'datasets'), train=True, download=True, transform=transformcompose)

m = np.zeros(3)
s = np.zeros(3)
images, labels = list(zip(*dataset))
for image, label in zip(images, labels):
  image = np.array(image)
Ejemplo n.º 15
0
        'batch_size': [64],
        'dis_lr': [5e-5],
        'dis_lr_decay': [0],
        'adv_lr': [5e-5],
        'adv_lr_decay': [0],
        'dis_kernel_size': [5],
        'gen_kernel_size': [5],
        'dis_dropout': [0],
        'gen_dropout': [0],
    }
    hyper_parameters_lst = get_items(hyper_parameters)

    for dict_ in hyper_parameters_lst:
        print(tf.__version__)
        print(dict_)
        experiment = Experiment(api_key="GytDCgaK0BqzgRHv04J268iS1",
                                project_name="wgan-mnist",
                                workspace="pooya-mohammadi-k")
        model = GAN(experiment=experiment,
                    n_channels=1,
                    img_height=28,
                    img_width=28,
                    data_set=mnist,
                    model_name='mnist_wgan',
                    gen_filters=[128, 64, 32, 1],
                    gen_strides=[2, 2, 1, 1],
                    gen_first_shape=[7, 7, 128],
                    dis_filters=[32, 64, 128, 256],
                    dis_strides=[2, 2, 2, 1],
                    **dict_)
Ejemplo n.º 16
0
    def train(self):
        if not self._disable_comet:
            # logging
            COMET_PROJECT_NAME = 'weibo-stc'
            COMET_WORKSPACE = 'timchen0618'

            self.exp = Experiment(
                project_name=COMET_PROJECT_NAME,
                workspace=COMET_WORKSPACE,
                auto_output_logging='simple',
                auto_metric_logging=None,
                display_summary=False,
            )

            self.exp.add_tag(self.args.task)
            if self.args.task != 'pure_seq2seq':
                if self.args.processed:
                    self.exp.add_tag('processed')
                else:
                    self.exp.add_tag('unprocessed')
            if self.args.sampler_label != 'none':
                self.exp.add_tag(self.args.sampler_label)
            if self._model_type == 'bert':
                self.exp.add_tag('BERT')

            self.exp.set_name(self.args.exp_name)
            self.exp.log_parameters(self.config)
            self.exp.log_parameters(self.config['model'])

        # if finetune, load pretrain
        if self.args.task == 'finetune':
            lr = 5e-7
            state_dict = torch.load(self.args.load_model)['state_dict']
            print('loading model from %s ...' % self.args.load_model)
            self.model.load_state_dict(state_dict)
        else:
            lr = self.config['lr_init']
            if self.args.load_model is not None:
                state_dict = torch.load(self.args.load_model,
                                        map_location='cuda:%d' %
                                        self.args.gpuid)['state_dict']
                print('loading model from %s ...' % self.args.load_model)
                self.model.load_state_dict(state_dict)

        if self.args.pretrain_embedding:
            self.model.load_embedding(self.args.pretrain_embedding)

        # Optimizer and some info for logging.
        if self.config['optimizer'] == 'adam':
            optim = torch.optim.Adam(self.model.parameters(),
                                     lr=lr,
                                     betas=(0.9, 0.98),
                                     eps=1e-9,
                                     weight_decay=0)
        elif self.config['optimizer'] == 'adamw':
            optim = torch.optim.AdamW(self.model.parameters(),
                                      lr=lr,
                                      betas=(0.9, 0.98),
                                      eps=1e-9)
        else:
            raise NotImplementedError

        total_loss = []
        p_gen_list = []
        start = time.time()
        step = self.args.start_step
        print('starting from step %d' % step)

        for epoch in range(self.config['num_epoch']):
            self.model.train()
            train_data = self.data_utils.data_yielder(valid=False)

            for batch in train_data:
                # print('-'*30)
                # Whether do noam learning rate scheduling
                if self.config['noam_decay']:
                    if step % 5 == 1:
                        lr = self.config['lr'] * (
                            1 / (self.config['model']['d_model']**0.5)) * min(
                                (1 / (step)**0.5), (step) *
                                (1 / (self.config['warmup_steps']**1.5)))
                        if self.args.task == 'finetune':
                            lr /= self.config['lr_decay']
                        for param_group in optim.param_groups:
                            param_group['lr'] = lr

                tgt_mask = batch['tgt_mask'].long()
                y = batch['y'].long()

                if self._model_type == 'bert':
                    inp = batch['src']['input_ids'].cuda()
                    out = self.model.forward(inp)
                    pred = tens2np(out.topk(1, dim=-1)[1].squeeze())
                    p_gen_list.append(0.0)
                else:
                    tgt = batch['tgt'].long()
                    src = batch['src'].long()
                    src_mask = batch['src_mask'].long()

                    # Forwarding (with mask or not)
                    if self.config['pos_masking']:
                        out, p_gen = self.model.forward_with_mask(
                            src, tgt, src_mask, tgt_mask, batch['posmask'])
                    elif self.args.task == 'joint_gen' and self.config[
                            'greedy']:
                        out = self.model.forward_with_ss(
                            src, src_mask, tgt, self.config['max_decode_step'],
                            self.data_utils.bos)
                        # print('out', out.size())
                        p_gen = torch.zeros((1, 1))
                    else:
                        out, p_gen = self.model.forward(
                            src, tgt, src_mask, tgt_mask)

                    # Info for printing
                    pred = tens2np(out.topk(1, dim=-1)[1].squeeze())
                    p_gen = p_gen.mean()
                    p_gen_list.append(p_gen.item())

                loss = self.model.loss_compute(out, y, self.data_utils.pad)
                loss.backward()

                optim.step()
                optim.zero_grad()
                total_loss.append(tens2np(loss))

                # print out info
                if step % self.config['print_every_step'] == 0:
                    elapsed = time.time() - start
                    print(
                        "Epoch Step: %d Loss: %f  P_gen:%f Time: %f Lr: %4.6f"
                        % (step, np.mean(total_loss),
                           sum(p_gen_list) / len(p_gen_list), elapsed, lr))

                    if self._model_type == 'bert':
                        source_text = tens2np(inp.long())
                        target_text = tens2np(batch['y'].long())
                    elif self._model_type == 'transformer':
                        source_text = tens2np(batch['src'].long())
                        target_text = tens2np(batch['tgt'].long())

                    print('src:', self.data_utils.id2sent(source_text[0]))
                    print('tgt:', self.data_utils.id2sent(target_text[0]))
                    print('pred:', self.data_utils.id2sent(pred[0]))

                    # If using transformer, we want to see greedy decoding result
                    if self._model_type == 'transformer':
                        if self.config['pos_masking']:
                            greedy_text = self.model.greedy_decode(
                                src.long()[:1], src_mask[:1],
                                self.config['max_len'], self.data_utils.bos,
                                batch['posmask'][:1])
                        else:
                            greedy_text = self.model.greedy_decode(
                                src.long()[:1], src_mask[:1],
                                self.config['max_len'], self.data_utils.bos)
                        greedy_text = tens2np(greedy_text)
                        print('pred_greedy:',
                              self.data_utils.id2sent(greedy_text[0]))

                    # logging statistics
                    if not self._disable_comet:
                        self.exp.log_metric('Train Loss',
                                            np.mean(total_loss),
                                            step=step)
                        self.exp.log_metric('Lr', lr, step=step)
                    print()
                    start = time.time()
                    total_loss = []
                    p_gen_list = []

                # Do validation
                if step % self.config['valid_every_step'] == self.config[
                        'valid_every_step'] - 1:
                    self.validate(step)

                step += 1
Ejemplo n.º 17
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test

    print('Setting up data...')
    Dataset = get_dataset(opt.dataset, opt.task)
    f = open(opt.data_cfg)
    data_config = json.load(f)
    trainset_paths = data_config['train']
    dataset_root = data_config['root']
    f.close()
    transforms = T.Compose([T.ToTensor()])
    dataset = Dataset(opt,
                      dataset_root,
                      trainset_paths, (1088, 608),
                      augment=True,
                      transforms=transforms)
    opt = opts().update_dataset_info_and_set_heads(opt, dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    # Get dataloader

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    experiment = Experiment(
        api_key="SK59eWBf9ldDhEMbsQx7IW9G6",
        project_name="fairmot",
        workspace="noudvdgevel",
        auto_param_logging=False,
        auto_metric_logging=False,
        auto_output_logging=False
    )  #Comet experiment. Active metric logged in base_trainer

    hyper_params = {
        "learning_rate": opt.lr,
        "learning_rate_steps": opt.lr_step,
        "batch_size": opt.batch_size,
        "data": opt.data_cfg,
        "re_id_dim": opt.reid_dim,
        "architecture": opt.arch
    }
    experiment.log_parameters(hyper_params)
    experiment.set_name(opt.exp_id)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, experiment, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            test_opt = opt
            test_opt.load_model = '../exp/mot/' + opt.exp_id + '/model_last.pth'
            with torch.no_grad():
                mean_mAP, mean_R, mean_P = test_det(test_opt,
                                                    batch_size=2,
                                                    print_interval=1)
                tar_at_far = test_emb(test_opt, batch_size=1, print_interval=1)

            test_results = {
                'mAP': mean_mAP,
                'recall': mean_R,
                'precision': mean_P,
                'TPR@FARe-6': tar_at_far[0],
                'TPR@FARe-5': tar_at_far[1],
                'TPR@FARe-4': tar_at_far[2],
                'TPR@FARe-3': tar_at_far[3],
                'TPR@FARe-2': tar_at_far[4],
                'TPR@FARe-1': tar_at_far[5]
            }
            experiment.log_metrics(test_results)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)

        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        if epoch % 5 == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
    logger.close()
from comet_ml import Experiment

#create an experiment with your api key
experiment = Experiment(api_key="Dz2W3DAahv0OvSAERUfhA5b7I",
                        project_name='general',
                        auto_param_logging=False)

import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.callbacks import EarlyStopping

batch_size = 16
num_classes = 10
epochs = 5
num_nodes = 16
optimizer = 'adam'
activation = 'relu'

# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
Ejemplo n.º 19
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    if args.comet_api_key is not None:
        comet_experiment = Experiment(api_key=args.comet_api_key,
                                      project_name=args.comet_project_name, workspace=args.comet_workspace)
        comet_experiment.add_tag(args.experiment_tag)
        comet_experiment.set_name(args.experiment_tag)
        # get the experiment key from comet and replace the one passed through the arguments
        args.experiment_key = comet_experiment.get_key()

    # modify the snapshot path to include the experiment key
    args.snapshot_path = make_dir(os.path.join(args.snapshot_path, args.experiment_key))

    # create the generators
    train_generator, validation_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model            = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model   = model
        anchor_params    = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            config=args.config
        )

    # print model summary
    print(model.summary())

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)
        if validation_generator:
            validation_generator.compute_shapes = train_generator.compute_shapes

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # start training
    training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=args.steps,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
    )
Ejemplo n.º 20
0
#Linear metadata model for testing purposes
from comet_ml import Experiment
import tensorflow as tf
from DeepTreeAttention.trees import AttentionModel
from DeepTreeAttention.models import metadata
from DeepTreeAttention.callbacks import callbacks
import pandas as pd

model = AttentionModel(
    config="/home/b.weinstein/DeepTreeAttention/conf/tree_config.yml")
model.create()

#Log config
experiment = Experiment(project_name="neontrees", workspace="bw4sz")
experiment.log_parameters(model.config["train"])
experiment.log_parameters(model.config["evaluation"])
experiment.log_parameters(model.config["predict"])
experiment.add_tag("metadata")

##Train
#Train see config.yml for tfrecords path with weighted classes in cross entropy
with experiment.context_manager("metadata"):
    model.read_data(mode="metadata")
    class_weight = model.calc_class_weight()
    model.train(submodel="metadata",
                experiment=experiment,
                class_weight=class_weight)
Ejemplo n.º 21
0
def start_experiment(output_dir,
                     cometml_project="",
                     wandb_project="",
                     run_id=None,
                     note_params="",
                     extra_kwargs=None,
                     overwrite=False):
    """Start a model training experiment. This will create a new output directory
    and setup the experiment management handles
    """
    sys.path.append(os.getcwd())
    if cometml_project:
        logger.info("Using comet.ml")
        if Experiment is None:
            raise ImportError("Comet.ml could not be imported")
        workspace, project_name = cometml_project.split("/")
        cometml_experiment = Experiment(project_name=project_name,
                                        workspace=workspace)
        # TODO - get the experiment id
        # specify output_dir to that directory
    else:
        cometml_experiment = None

    if wandb_project:
        assert "/" in wandb_project
        entity, project = wandb_project.split("/")
        if wandb is None:
            logger.warn("wandb not installed. Not using it")
            wandb_run = None
        else:
            logger.info("Using wandb. Running wandb.init()")
            wandb._set_stage_dir("./")  # Don't prepend wandb to output file
            if run_id is not None:
                wandb.init(project=project,
                           dir=output_dir,
                           entity=entity,
                           reinit=True,
                           resume=run_id)
            else:
                # automatically set the output
                wandb.init(project=project,
                           entity=entity,
                           reinit=True,
                           dir=output_dir)
            wandb_run = wandb.run
            if wandb_run is None:
                logger.warn("Wandb run is None")
            print(wandb_run)
    else:
        wandb_run = None

    # update the output directory
    if run_id is None:
        if wandb_run is not None:
            run_id = os.path.basename(wandb_run.dir)
        elif cometml_experiment is not None:
            run_id = cometml_experiment.id
        else:
            # random run_id
            run_id = str(uuid4())
    output_dir = os.path.join(output_dir, run_id)

    if wandb_run is not None:
        # make sure the output directory is the same
        # wandb_run._dir = os.path.normpath(output_dir)  # This doesn't work
        # assert os.path.normpath(wandb_run.dir) == os.path.normpath(output_dir)
        # TODO - fix this assertion-> the output directories should be the same
        # in order for snakemake to work correctly
        pass
    # -----------------------------

    if os.path.exists(os.path.join(output_dir, 'config.gin')):
        if overwrite:
            logger.info(
                f"config.gin already exists in the output "
                "directory {output_dir}. Removing the whole directory.")
            shutil.rmtree(output_dir)
        else:
            raise ValueError(f"Output directory {output_dir} shouldn't exist!")
    os.makedirs(output_dir,
                exist_ok=True)  # make the output directory. It shouldn't exist

    # add logging to the file
    add_file_logging(output_dir, logger)

    # write note_params.json
    if note_params:
        logger.info(f"note_params: {note_params}")
        note_params_dict = kv_string2dict(note_params)
    else:
        note_params_dict = dict()
    write_json(note_params_dict,
               os.path.join(output_dir, "note_params.json"),
               sort_keys=True,
               indent=2)

    if cometml_experiment is not None:
        cometml_experiment.log_parameters(note_params_dict)
        cometml_experiment.log_parameters(dict(output_dir=output_dir),
                                          prefix='cli/')

        exp_url = f"https://www.comet.ml/{cometml_experiment.workspace}/{cometml_experiment.project_name}/{cometml_experiment.id}"
        logger.info("Comet.ml url: " + exp_url)
        # write the information about comet.ml experiment
        write_json(
            {
                "url": exp_url,
                "key": cometml_experiment.id,
                "project": cometml_experiment.project_name,
                "workspace": cometml_experiment.workspace
            },
            os.path.join(output_dir, "cometml.json"),
            sort_keys=True,
            indent=2)

    if wandb_run is not None:
        wandb_run.config.update(note_params_dict)
        write_json(
            {
                "url": wandb_run.get_url(),
                "key": wandb_run.id,
                "project": wandb_run.project,
                "path": wandb_run.path,
                "group": wandb_run.group
            },
            os.path.join(output_dir, "wandb.json"),
            sort_keys=True,
            indent=2)
        wandb_run.config.update(
            dict_prefix_key(dict(output_dir=output_dir), prefix='cli/'))

    return cometml_experiment, wandb_run, output_dir
Ejemplo n.º 22
0
# import comet_ml in the top of your file
from comet_ml import Experiment

# Adding the following code to our machine learning file
experiment = Experiment(api_key="9gsTl4Wv73PDkYEoX8PUt5RSX",
                        project_name="NLP",
                        workspace="ms-noxolo")

# Run your code and go to https://www.comet.ml/

# Importing modules for data science and visualization
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# NLP Libraries
import re
import string
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.stem import SnowballStemmer
# ML Libraries
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
Ejemplo n.º 23
0
from comet_ml import Experiment
import tensorflow_datasets as tfds
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Embedding, Bidirectional, LSTM, Dense, Dropout
from tensorflow.keras.optimizers import Adam

BUFFER_SIZE = 10000
BATCH_SIZE = 64
NUM_EPOCHS = 10

experiment = Experiment(project_name='tf2-imdb',
                        workspace='koichiro-mori',
                        auto_param_logging=False)


def load_dataset():
    dataset, info = tfds.load('imdb_reviews/subwords8k',
                              with_info=True,
                              as_supervised=True)
    train_ds, test_ds = dataset['train'], dataset['test']

    # padded_batch()はミニバッチで最長の系列に合わせてpaddingする
    train_ds = train_ds.shuffle(BUFFER_SIZE).padded_batch(
        BATCH_SIZE, padded_shapes=([None], []))
    test_ds = test_ds.padded_batch(BATCH_SIZE, padded_shapes=([None], []))

    encoder = info.features['text'].encoder

    return train_ds, test_ds, encoder.vocab_size

Ejemplo n.º 24
0
oDataSet = DataSet()
base = np.loadtxt("Datasets/XOR.txt", usecols=range(2), delimiter=",")
classes = np.loadtxt("Datasets/XOR.txt", dtype=float, usecols=-1, delimiter=",")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()

lb = LabelBinarizer()
lb.fit(oDataSet.labels)

for j in range(20):
    experiment = Experiment(api_key="9F7edG4BHTWFJJetI2XctSUzM",
                            project_name="mest-rn-t6-xor",
                            workspace="lukkascost",
                            )
    experiment.set_name("REALIZACAO_{:02d}".format(j + 1))

    slices = KFold(n_splits=K_FOLD, shuffle=True)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent(np.unique(classes, return_counts=True)[1], 0.8)
    grid_result = np.zeros((len(GRID_NEURON), len(GRID_B), K_FOLD))
    for g1, g_param in enumerate(GRID_NEURON):
        for g2, g2_param in enumerate(GRID_B):
            k_slice = 0
            for train, test in slices.split(oData.Training_indexes):
                model = Sequential()
                rbflayer = RBFLayer(g_param,
                                    initializer=InitCentersRandom(oDataSet.attributes[oData.Training_indexes[train]]),
                                    betas=g2_param,
Ejemplo n.º 25
0
def main(cfg: DictConfig):
    print('Cassava Leaf Disease Classification')
    cur_dir = hydra.utils.get_original_cwd()
    os.chdir(cur_dir)
    # Config  -------------------------------------------------------------------
    data_dir = './input'
    seed_everything(cfg.data.seed)

    # Comet_ml
    experiment = Experiment(api_key=cfg.comet_ml.api_key,
                            project_name=cfg.comet_ml.project_name,
                            auto_param_logging=False,
                            auto_metric_logging=False)

    # Log Parameters
    experiment.log_parameters(dict(cfg.data))
    experiment.log_parameters(dict(cfg.train))

    # Data Module  ---------------------------------------------------------------
    transform = get_transforms(transform_name=cfg.data.transform,
                               img_size=cfg.data.img_size)
    cv = StratifiedKFold(n_splits=cfg.data.n_splits,
                         shuffle=True,
                         random_state=cfg.data.seed)
    dm = CassavaDataModule(data_dir,
                           cfg,
                           transform,
                           cv,
                           use_merge=True,
                           sample=DEBUG)

    # Model  ----------------------------------------------------------------------
    net = Timm_model(cfg.train.model_type, pretrained=True)

    # Log Model Graph
    experiment.set_model_graph(str(net))

    # Loss fn  ---------------------------------------------------------------------
    df = pd.read_csv('./input/merged.csv')
    weight = df['label'].value_counts().sort_index().tolist()
    weight = [w / len(df) for w in weight]
    weight = torch.tensor(weight).cuda()
    del df

    criterion = get_loss_fn(cfg.train.loss_fn, weight=weight, smoothing=0.05)

    # Optimizer, Scheduler  --------------------------------------------------------
    if cfg.train.use_sam:
        base_optimizer = RAdam
        optimizer = SAM(net.parameters(),
                        base_optimizer,
                        lr=cfg.train.lr,
                        weight_decay=cfg.train.weight_decay)
    else:
        optimizer = RAdam(net.parameters(),
                          lr=cfg.train.lr,
                          weight_decay=cfg.train.weight_decay)

    scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
                                               T_max=cfg.train.epoch,
                                               eta_min=0)

    # Lightning Module  -------------------------------------------------------------
    model = CassavaLightningSystem(net,
                                   cfg,
                                   criterion=criterion,
                                   optimizer=optimizer,
                                   scheduler=scheduler,
                                   experiment=experiment)

    # Trainer  -------------------------------------------------------------------------
    trainer = Trainer(
        logger=False,
        max_epochs=cfg.train.epoch,
        gpus=-1,
        amp_backend='apex',
        amp_level='O2',
        num_sanity_val_steps=0,  # Skip Sanity Check
        automatic_optimization=False if cfg.train.use_sam else True,
        # resume_from_checkpoint='./checkpoints/epoch=3-step=14047.ckpt'
    )

    # Train
    trainer.fit(model, datamodule=dm)
    #     param.requires_grad = False
    #     if name[5] == str(hyper_params['stage']) and hyper_params['stage'] != 0 :
    #         param.requires_grad = True
    #     elif (name[0] == 'b' or name[0] == 'c') and hyper_params['stage'] == 0 :
    #         param.requires_grad = True

    for name, param in net.named_parameters():
        param.requires_grad = False
        if (name[0] == 'b' or name[0] == 'c' or name[5] == str(0)
                or name[5] == str(1) or name[5] == str(2)):
            param.requires_grad = True

    project_name = 'trad-kd-new-' + hyper_params['model'] + '-' + hyper_params[
        'dataset']
    experiment = Experiment(api_key="1jNZ1sunRoAoI2TyremCNnYLO",
                            project_name=project_name,
                            workspace="akshaykvnit")
    experiment.log_parameters(hyper_params)

    filename = '../saved_models/' + str(
        hyper_params['dataset']) + '/trad_kd/' + str(
            hyper_params['model']) + '_stage' + str(
                hyper_params['stage']) + '/model' + str(
                    hyper_params['repeated']) + '.pt'

    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=hyper_params["learning_rate"])
    total_step = len(data.train_ds) // hyper_params["batch_size"]
    train_loss_list = list()
    val_loss_list = list()
    min_val = 100
Ejemplo n.º 27
0
client_opt = "SGD"
client_opt_strategy = "reinit"
# image_norm = "tflike"
# TODO a paraméterek helytelen nevére nem adott hibát
s_opt_args = common.get_args(server_opt)
config = TorchFederatedLearnerCIFAR100Config(
    BREAK_ROUND=1500,
    CLIENT_LEARNING_RATE=client_lr,
    CLIENT_OPT=client_opt,
    # CLIENT_OPT_ARGS=common.get_args(client_opt),
    CLIENT_OPT_L2=1e-4,
    CLIENT_OPT_STRATEGY=client_opt_strategy,
    SERVER_OPT=server_opt,
    SERVER_OPT_ARGS=s_opt_args,
    SERVER_LEARNING_RATE=server_lr,
    IS_IID_DATA=is_iid,
    BATCH_SIZE=B,
    CLIENT_FRACTION=C,
    N_CLIENTS=NC,
    N_EPOCH_PER_CLIENT=E,
    MAX_ROUNDS=max_rounds,
    IMAGE_NORM="recordwisefull",
    NORM="group",
    INIT="tffed",
    AUG="basicf"
)
config_technical = TorchFederatedLearnerTechnicalConfig(HIST_SAMPLE=0)
name = f"{config.SERVER_OPT}: {config.SERVER_LEARNING_RATE} - {config.CLIENT_OPT_STRATEGY} - {config.CLIENT_OPT}: {config.CLIENT_LEARNING_RATE}"
experiment = Experiment(workspace="federated-learning", project_name=project_name)
common.do_training(experiment, name, config, config_technical)
Ejemplo n.º 28
0
from comet_ml import Experiment
from src.models.source_encoder import SourceEncoder
from src.models.target_encoder import TargetEncoder
from src.models.classifier import Classifier
from src.models.domain_discriminator import DomainDiscriminator
from src.models.sdm import SDMG, SDMD
from src.trainers.da import DomainAdversarialTrainer
from src.trainers.sdm import SourceDistributionModelingTrainer
from src.data.damnist import DAMNIST
import torch.utils.data as data
from torchvision import transforms

print('Start Experimentation')
experiment = Experiment(api_key="laHAJPKUmrD2TV2dIaOWFYGkQ",
                        project_name="iada",
                        workspace="yamad07")

source_transform = transforms.Compose([
    transforms.Resize((28, 28)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, )),
])
target_transform = transforms.Compose([
    transforms.Resize((14, 28)),
    transforms.Pad((0, 7, 0, 7)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, )),
])
mnist_dataset = DAMNIST(root='./data/',
                        download=True,
                        source_transform=source_transform,
Ejemplo n.º 29
0
from comet_ml import Experiment

experiment = Experiment(api_key="oda8KKpxlDgWmJG5KsYrrhmIV",
                        project_name="consensusnet")

import numpy as np
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input
from keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPool2D
from keras.callbacks import LearningRateScheduler, EarlyStopping

import sys

module_path = '/home/diplomski-rad/consensus-net/src/python/utils/'
if module_path not in sys.path:
    print('Adding utils module.')
    sys.path.append(module_path)
from args_parsers import parse_train_args


def main(args):
    args = parse_train_args(args)

    X_train = np.load(args.X_train)
    X_validate = np.load(args.X_validate)
    y_train = np.load(args.y_train)
    y_validate = np.load(args.y_validate)

    model_save_path = args.model_save_path

    example_shape = X_train.shape[1:]
Ejemplo n.º 30
0
results_list = list()

while NUM_ITER > 0:
    NUM_ITER -= 1

    search_space_state = pd.read_excel(search_space_path)

    random_index = random.choice(
        list(search_space_state.loc[search_space_state['Trained'] ==
                                    'No'].index))
    search_space_model = search_space_state.loc[search_space_state['Trained']
                                                == 'No'].loc[random_index]

    comet_experiment = Experiment(
        api_key='A8Lg71j9LtIrsv0deBA0DVGcR',
        project_name=f'{ALGORITHM}-afib',
        workspace="8_dps",
        auto_output_logging='native',
    )
    comet_experiment.set_name(search_space_model['Model'])
    comet_experiment.add_tags([DS, SEGMENTS_LENGTH, ALGORITHM])

    if search_space_model['model_architecture'] == 1:
        model = keras.Sequential([
            Dense(units=int(X_train.shape[1] / 2),
                  input_shape=(X_train.shape[1], ),
                  activation=search_space_model['activation'],
                  kernel_initializer=search_space_model['initializer']),
            Dense(units=1, activation='sigmoid')
        ])

    if search_space_model['model_architecture'] == 2: