Beispiel #1
0
    def __init__(self,
                 main_folder_path,
                 model,
                 train_df,
                 test_df,
                 num_classes,
                 target_dim,
                 device,
                 is_colab,
                 config,
                 args_text=None):
        self.main_folder_path = main_folder_path
        self.model = model
        self.train_df = train_df
        self.test_df = test_df
        self.device = device
        self.config = config
        self.num_classes = num_classes
        self.target_dim = target_dim
        self.is_colab = is_colab
        self.args_text = args_text
        if "faster" in self.config.model_name:
            # special case of training the conventional model based on Faster R-CNN
            params = [p for p in self.model.parameters() if p.requires_grad]
            self.optimizer = self.config.optimizer_class(
                params, **self.config.optimizer_config)
        else:
            self.optimizer = self.config.optimizer_class(
                self.model.parameters(), **self.config.optimizer_config)
        self.scheduler = self.config.scheduler_class(
            self.optimizer, **self.config.scheduler_config)
        self.model_file_path = self.get_model_file_path(
            self.is_colab,
            prefix=config.model_file_prefix,
            suffix=config.model_file_suffix)
        self.log_file_path = self.get_log_file_path(
            self.is_colab, suffix=config.model_file_suffix)
        self.epoch = 0
        self.visualize = visualize.Visualize(self.main_folder_path,
                                             self.target_dim,
                                             dest_folder='Images')

        # use our dataset and defined transformations
        self.dataset = bus_dataset.BusDataset(self.main_folder_path,
                                              self.train_df, self.num_classes,
                                              self.target_dim,
                                              self.config.model_name, False,
                                              T.get_transform(train=True))
        self.dataset_test = bus_dataset.BusDataset(
            self.main_folder_path, self.test_df, self.num_classes,
            self.target_dim, self.config.model_name, False,
            T.get_transform(train=False))

        # TODO(ofekp): do we need this?
        # split the dataset in train and test set
        # indices = torch.randperm(len(dataset)).tolist()
        # dataset = torch.utils.data.Subset(dataset, indices[:-50])
        # dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])

        self.log('Trainer initiallized. Device is [{}]'.format(self.device))
Beispiel #2
0
    def __init__(self, analytics, token):
        self.analytics = analytics

        def on_trigger(operation, trigger, ticker):
            self.trigger_alert(operation, trigger, ticker)

        self.analytics.on_trigger = on_trigger
        self.bot = telepot.Bot(token)
        self.me = self.bot.getMe()
        self.bot.notifyOnMessage(self._on_message)
        self.db = shelve.open(const.DATA_DIR + '/bot.shelve.db')
        self.visualize = visualize.Visualize()
        self._set_alert_triggers()
        click.echo("Telegram Bot running as @{}".format(self.me['username']))
Beispiel #3
0
def main():

    m = map.HomelyHill()

    def play_map(player, game_map):
        g = game.Game(game_map)
        player.play(g)
        return g

    players = [
        player.Human(),
        player.NaturalSelection(),
        player.NaturalEvolutionSGD(),
        player.FiniteDifferences(),
        player.ScipyOptimizer('BFGS'),
        player.ScipyOptimizer('Powell'),
        player.ScipyOptimizer('Nelder-Mead'),
        player.ScipyOptimizer('CG'),
        player.ScipyOptimizer('COBYLA')
    ]
    games = [play_map(player, m) for player in players]

    v = visualize.Visualize(players, games, m)
    v.show(n_evaluations=1)
def train_inverse(args, train, test, pdn):
    if train.shape[1] == 1:
        model = inversed_model.InversedProbabilityDistributionNetwork(pdn, 1)
    elif train.shape[1] == 2:
        model = inversed_model.InversedProbabilityDistributionNetwork(pdn, 2)
    else:
        raise RuntimeError('Invalid dataset.')

    if args.gpu >= 0:
        model.to_gpu()

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize, False,
                                                 False)

    stop_trigger = (args.epoch, 'epoch')

    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, stop_trigger, out=args.out)
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    trainer.extend(extensions.snapshot(
        filename='inverse_snapshot_epoch_{.updater.epoch}'),
                   trigger=(50, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))
    trainer.extend(visualize.Visualize("inverse_{.updater.epoch}.png", pdn,
                                       test, model),
                   trigger=(10, 'epoch'))

    trainer.run()
def main():

    if len(sys.argv) < 5:
        usage = "\n Usage: python mainly_class.py classifier_type normalize_data apply_pca_data visualize run_all\
        \n\n\t classifier_type : 1 => Adaboost\
        \n\t classifier_type : 2 => KNN \n\t classifier_type : 3 => SVM \n\t classifier_type : 4 => logistic regression\
        \n\t classifier_type : 5 => Perceptron\n\t classifier_type : 6 => RandomForest\
        \n\t normalize_data: Normalize the training dataset\
        \n\t apply_pca_data: Apply pca on the training dataset\
        \n\t visualize: Visualize some data of the dataset\
        \n\t run_all: Run all models\
        \n\n\t ex : python mainly_class.py 0 0 0 0 1"
        print(usage)
        return

    classifier_type = int(sys.argv[1])
    normalize = int(sys.argv[2])
    pca = int(sys.argv[3])
    vis = int(sys.argv[4])
    run_all = int(sys.argv[5])

    print("Making Dataset...")
    generateur_donnees = md.MakeDataset()
    classes, data, labels, test, test_ids = generateur_donnees.prepare_data()

    if normalize == 1:
        print("Normalizing Dataset...")
        data = generateur_donnees.normalizer(data)

    if pca == 1:
        print("Applying PCA on dataset...")
        data = generateur_donnees.apply_pca(data)

    # Visualizing Data
    if vis == 1:
        visio = vz.Visualize()
        visio.show_correlation(data)

    # Run for each classifier
    if classifier_type != 0:
        print(" Training with the specified classifier...")
        train_model = tm.trainModel(classifieur=classifier_type)
        trainData, testData, trainLabels, testLabels = train_test_split(data, labels, test_size=0.2, random_state=0)
        train_model.entrainement(trainData, trainLabels, testData, testLabels, test, classes, test_ids)

    # Run for all classifiers
    if run_all == 1:
        print(" Training with all classifiers...")
        trainData, testData, trainLabels, testLabels = train_test_split(data, labels, test_size=0.2, random_state=0)
        train_model = tm.trainModel(classifieur=1)
        train_model.entrainement(trainData, trainLabels, testData, testLabels)
        train_model = tm.trainModel(classifieur=2)
        train_model.entrainement(trainData, trainLabels, testData, testLabels)
        train_model = tm.trainModel(classifieur=3)
        train_model.entrainement(trainData, trainLabels, testData, testLabels)
        train_model = tm.trainModel(classifieur=4)
        train_model.entrainement(trainData, trainLabels, testData, testLabels)
        train_model = tm.trainModel(classifieur=5)
        train_model.entrainement(trainData, trainLabels, testData, testLabels)
        train_model = tm.trainModel(classifieur=6)
        train_model.entrainement(trainData, trainLabels, testData, testLabels)
Beispiel #6
0
import visualize

NO_OF_PACKETS_SENDER1 = 10
NO_OF_PACKETS_SENDER2 = 20
NO_OF_PACKETS_SENDER3 = 15

FPS_OF_SENDER1 = 9
FPS_OF_SENDER2 = 80
FPS_OF_SENDER3 = 34
FPS_OF_NODE1 = 0
FPS_OF_NODE2 = 150

BUFFER_OF_NODE1 = 3
BUFFER_OF_NODE2 = 4

visual = visualize.Visualize(NO_OF_PACKETS_SENDER1, NO_OF_PACKETS_SENDER2,
                             NO_OF_PACKETS_SENDER3, FPS_OF_SENDER1,
                             FPS_OF_SENDER2, FPS_OF_SENDER3, FPS_OF_NODE1,
                             FPS_OF_NODE2, BUFFER_OF_NODE1, BUFFER_OF_NODE2)
visual.start()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--dim-z',
                        '-z',
                        default=64,
                        type=int,
                        help='dimension of encoded vector')
    parser.add_argument('--masks',
                        '-m',
                        default=4,
                        type=int,
                        help='Number of mask images')
    parser.add_argument('--epoch',
                        '-e',
                        default=20,
                        type=int,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        default=0,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    args = parser.parse_args()

    if args.gpu >= 0:
        chainer.config.autotune = True
        chainer.backends.cuda.get_device_from_id(args.gpu).use()

    model = net.autoencoder.Model1([64, 128, 256, args.dim_z],
                                   [args.dim_z, 256, 128, 64], 3, args.masks)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_cifar10()
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize, False,
                                                 False)
    dump_iter = chainer.iterators.SerialIterator(test, args.batchsize)

    updater = chainer.training.StandardUpdater(train_iter,
                                               optimizer,
                                               device=args.gpu)
    trainer = chainer.training.trainer.Trainer(updater, (args.epoch, 'epoch'),
                                               args.out)

    trainer.extend(
        chainer.training.extensions.Evaluator(test_iter,
                                              model,
                                              device=args.gpu))
    trainer.extend(chainer.training.extensions.FailOnNonNumber())
    trainer.extend(observe_weight, trigger=(1, 'epoch'))
    trainer.extend(chainer.training.extensions.LogReport())
    trainer.extend(
        chainer.training.extensions.PrintReport([
            'epoch',
            'main/mse',
            'validation/main/mse',
            'main/z_loss',
            'validation/main/z_loss',
            'weight_std',
            'elapsed_time',
        ]))
    trainer.extend(visualize.Visualize(dump_iter, model, device=args.gpu),
                   trigger=(1, 'epoch'))

    trainer.run()
Beispiel #8
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Learning cumulative distribution function with Monotonic Networks:')
    parser.add_argument(
        '--dataset',
        '-d',
        default='gaussian_1d',
        help='The dataset to use: gaussian_1d or gaussian_mix_2d')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    if args.dataset == 'gaussian_1d':
        train = sampler.gaussian_1d(numpy, 4096)
        test = sampler.gaussian_1d(numpy, 1024)
    elif args.dataset == 'gaussian_mix_2d':
        train = sampler.gaussian_mixture_circle(numpy, 32768)
        test = sampler.gaussian_mixture_circle(numpy, 1024)
    elif args.dataset == 'gaussian_half_1d':
        train = sampler.half_gaussian_1d(numpy, 16384)
        test = sampler.half_gaussian_1d(numpy, 1024)
    elif args.dataset == 'half_gaussian_2d':
        train = sampler.truncated_gaussian_circle(numpy, 32768)
        test = sampler.truncated_gaussian_circle(numpy, 1024)
    else:
        raise RuntimeError('Invalid dataset: {}.'.format(args.dataset))

    if train.shape[1] == 1:
        model = models.ProbabilityDistributionNetwork(1, [16, 16, 16],
                                                      [16, 16], 4)
    elif train.shape[1] == 2:
        model = models.ProbabilityDistributionNetwork(2, [32, 32, 32],
                                                      [32, 32], 8)
    else:
        raise RuntimeError('Invalid dataset.')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize, False,
                                                 False)

    stop_trigger = (args.epoch, 'epoch')

    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, stop_trigger, out=args.out)
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
        trigger=(10, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))
    trainer.extend(visualize.Visualize(model, test))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Beispiel #9
0
    x1 = data['x1']
    y1 = data['y1']

    F = eightpoint(pts1, pts2, M)

    np.savez_compressed('./q2_1.npz', F=F, M=M)

    helper.displayEpipolarF(I1, I2, F)

    bestF, Farray_best, points1, points2 = BestFSevenPoints(pts1, pts2, M)

    np.savez_compressed('./q2_2.npz', F=bestF, M=M, pts1=points1, pts2=points2)

    helper.displayEpipolarF(I1, I2, bestF)

    E = essentialMatrix(F, K1, K2)

    Ms = helper.camera2(E)

    M2, M1, C2, C1, Points = findM2_function(K1, K2, Ms, pts1, pts2)

    np.savez_compressed('./q3_3.npz', M2=M2, C2=C2, P=Points)

    helper.epipolarMatchGUI(I1, I2, F)

    np.savez_compressed('./q4_1.npz', F=F, pts1=pts1, pts2=pts2)

    visualize.Visualize(I1, I2, x1, y1, C1, C2, F)

    np.savez_compressed('./q4_2.npz', F=F, C1=C1, C2=C2, M2=M2, M1=M1)
Beispiel #10
0
def run2(data_loader_test):
    import train
    import visualize
    import argparse
    import yaml
    import json
    import time
    import torch

    args = None
    with open("Args/args_text.yml", 'r') as args_file:
        args_text = args_file.read()
        parser = argparse.ArgumentParser()
        cfg = yaml.safe_load(args_text)
        parser.set_defaults(**cfg)
        args = parser.parse_args([])

    device = 'cuda:0'
    # device = 'cpu'
    main_folder_path = "."

    # # effdet d0
    # pip uninstall torchvision; pip install git+https://github.com/ofekp/vision.git
    # args.model_name = "tf_efficientdet_d0"
    # args.model_file_suffix = "effdet_h5py_rpn"
    # args.model_file_prefix = "effdet/"
    # args.box_threshold = 0.3

    # effdet d1
    args.model_name = "tf_efficientdet_d2"
    args.model_file_suffix = ""
    args.model_file_prefix = "Model/45AP_d2/"
    args.target_dim = 768
    args.box_threshold = 0.4

    # # effdet d1
    # args.model_name = "tf_efficientdet_d2"
    # args.model_file_suffix = "effdet_d2"
    # args.model_file_prefix = "effdet_d2/"
    # args.box_threshold = 0.30

    # faster
    # pip uninstall torchvision; pip install torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
    # args.model_name = "faster"
    # args.model_file_suffix = "faster_sgd"
    # args.model_file_prefix = "faster/"
    # args.box_threshold = None

    # args.box_threshold = None
    # args.model_file_suffix = "faster"
    # args.box_threshold = 0.3
    # args.model_file_suffix = "effdet_checkup"
    # args.data_limit = 2000

    num_classes = 6  # without background class
    train_df, test_df = train.process_data(main_folder_path, num_classes,
                                           args.test_size_pct)
    model = train.get_model_detection_efficientdet(
        args.model_name,
        num_classes,
        args.target_dim,
        freeze_batch_norm=args.freeze_batch_norm_weights)

    train_config = train.TrainConfig(args)
    is_colab = False
    trainer = train.Trainer(main_folder_path,
                            model,
                            train_df,
                            test_df,
                            num_classes,
                            args.target_dim,
                            device,
                            is_colab,
                            config=train_config,
                            args_text=args_text)
    trainer.load_model(device)

    dataset_test = bus_dataset.BusDataset(main_folder_path, test_df,
                                          num_classes, target_dim,
                                          config.model_name, False,
                                          T.get_transform(train=False))
    data_loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=self.config.batch_size,
        shuffle=False,
        num_workers=self.config.num_workers,
        collate_fn=DetectionFastCollate())
    data_loader_test = PrefetchLoader(data_loader_test)

    cpu_device = torch.device("cpu")
    vis = visualize.Visualize('.', args.target_dim)
    for i, (images, targets) in enumerate(data_loader_test):
        trainer.model.eval()
        with torch.no_grad():
            outputs = model(images, None)
            for output in outputs:
                predictions = output['detections'].to(cpu_device)

    num_of_detections = len(torch.where(targets['cls'][0] > -1)[0])
    vis.show_image_data(images[0], targets['cls'][0, :num_of_detections].int(),
                        None, targets['bbox'][0, :num_of_detections,
                                              [1, 0, 3, 2]])

    visualize = visualize.Visualize(main_folder_path, categories_df,
                                    args.target_dim)
    img_idx = 1
    trainer.model.eval()
    visualize.show_prediction_on_img(trainer.model,
                                     trainer.dataset_test,
                                     test_df,
                                     img_idx,
                                     train.is_colab,
                                     show_groud_truth=False,
                                     box_threshold=args.box_threshold,
                                     split_segments=True)

    dataset_test = imat_dataset.IMATDataset(main_folder_path, test_df,
                                            num_classes, args.target_dim,
                                            args.model_name, False,
                                            T.get_transform(train=False))

    for img_idx in range(100, 150):
        #     visualize.show_prediction_on_img(trainer.model, trainer.dataset, train_df, img_idx, train.is_colab, show_groud_truth=True, box_threshold=args.box_threshold, split_segments=True)
        visualize.show_prediction_on_img(trainer.model,
                                         dataset_test,
                                         test_df,
                                         img_idx,
                                         train.is_colab,
                                         show_groud_truth=True,
                                         box_threshold=0.3,
                                         split_segments=True)