class VisualizeTest(unittest.TestCase):
    def setUp(self):
        self.data_file = "data/data.json"
        self.output_file = "output/output.png"
        self.data = {1: 360, 3: 320, 4: 175, 6: 260, 8: 250, 10: 300}

        self._writeDataFile()
        self._deleteOutputFile()
        self.visualizer = Visualizer(self.data_file, self.output_file)

    def _writeDataFile(self):
        import json
        with open(self.data_file, 'w') as f:
            f.write(json.dumps(self.data))

    def _deleteOutputFile(self):
        import os
        try:
            os.remove(self.output_file)
        except OSError:
            pass

    def test_load(self):
        self.visualizer.load()
        self.assertEqual(self.visualizer.data, self.data)

    def test_savefig(self):
        import os
        self.visualizer.savefig()
        assert os.path.exists(self.visualizer.opfile)
        assert os.path.getsize(self.visualizer.opfile)
示例#2
0
    def __init__(self, name_list, num_classes=400, modality='RGB', **kwargs):
        self.__dict__.update(kwargs)
        self.num_classes = num_classes
        self.modality = modality
        self.name_list = name_list
        # set accuracy avg = 0
        self.count_early_stop = 0
        # Set best precision = 0
        self.best_prec1 = 0
        # init start epoch = 0
        self.start_epoch = 0

        if self.log_visualize != '':
            self.visualizer = Visualizer(logdir=self.log_visualize)

        self.checkDataFolder()

        self.loading_model()

        self.train_loader, self.val_loader = self.loading_data()

        # run
        self.processing()
        if self.random:
            print('random pick images')
示例#3
0
 def __init__(self):
     self.waypoints = []
     self.profile = None
     self.path = None
     self.robot = Robot(2, 15, 10)
     self.visualize = Visualizer(self.path, offset=self.robot.width / 2.)
     cmd.Cmd.__init__(self)
    def setUp(self):
        self.data_file = "data/data.json"
        self.output_file = "output/output.png"
        self.data = {1: 360, 3: 320, 4: 175, 6: 260, 8: 250, 10: 300}

        self._writeDataFile()
        self._deleteOutputFile()
        self.visualizer = Visualizer(self.data_file, self.output_file)
示例#5
0
 def __init__(self):
     self.running_loss = 0.0
     self.epochs = 20
     self.current_epoch = 0
     self.epoch_start_time = None
     self.model = None
     self.optimizer = None
     self.scheduler = None
     self.loss_fn = None
     self.vis = Visualizer()
示例#6
0
def train(**kwargs):

    #Set attributes
    for k, v in kwargs.items():
        setattr(opt, k, v)
    if opt.vis:
        visualizer = Visualizer()

    # Data
    data, word2ix, ix2word = get_data(opt)
    data = t.from_numpy(data)
    dataloader = DataLoader(data, batch_size=opt.batch_size, shuffle=True)

    # Model
    model = LyricsModel(len(word2ix), opt.embedding_dim, opt.latent_dim)
    if opt.model_path:
        model.load_state_dict(t.load(opt.model_path, map_location="cpu"))

    # Define optimizer and loss
    optimizer = Adam(model.parameters(), lr=opt.lr)
    criterion = nn.CrossEntropyLoss()
    loss_meter = meter.AverageValueMeter()

    if opt.use_gpu:
        model.cuda()
        criterion.cuda()

    #================================================#
    #               Start Training                   #
    #================================================#

    for epoch in tqdm.tqdm(range(opt.num_epoch)):

        for (ii, data) in enumerate(dataloader):
            # Prepare data
            data = data.long().transpose(1, 0).contiguous()
            if opt.use_gpu: data = data.cuda()
            inputs, targets = Variable(data[:-1, :]), Variable(data[1:, :])
            outputs, hidden = model(inputs)

            # Initialize and backward
            optimizer.zero_grad()
            loss = criterion(outputs, targets.view(-1))
            loss.backward()
            optimizer.step()

            loss_meter.add(loss.item())

            if (1 + ii) % opt.print_every == 0:
                print("Current Loss: %d" % loss.item())
                if opt.vis:
                    visualizer.plot('loss', loss_meter.value()[0])
        if (epoch + 1) % 20 == 0:
            t.save(model.state_dict(), 'checkpoints/%s.pth' % epoch)
示例#7
0
def testLoop(hexa, lnk, viz):
    if viz:
        viz = Visualizer(hexa);
    
    hexa.setWalking(1., 0., 0.);
    while True:
        hexa.tick();
        if lnk:
            lnk.tick();
        if viz:
            viz.tick();
示例#8
0
def controlLoop(hexa, lnk, viz):
    if viz:
        viz = Visualizer(hexa);
    
    with ControlSource(hexa) as control:
        while True:
            control.tick();
            hexa.tick();
            lnk.tick();
            if viz:
                viz.tick();
示例#9
0
    def __init__(self, args, model):
        self.args = args
        self.model = model

        # Initialize spatial/color transform for Equuivariance loss.
        self.tps = RandTPS(args.input_size[1], args.input_size[0],
                           batch_size=args.batch_size,
                           sigma=args.tps_sigma,
                           border_padding=args.eqv_border_padding,
                           random_mirror=args.eqv_random_mirror,
                           random_scale=(args.random_scale_low,
                                         args.random_scale_high),
                           mode=args.tps_mode).cuda(args.gpu)

        # Color Transorm.
        self.cj_transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ColorJitter(
                brightness=0.3, contrast=0.3, saturation=0.2, hue=0.2),
            transforms.ToTensor(), ])

        # KL divergence loss for equivariance
        self.kl = nn.KLDivLoss().cuda(args.gpu)

        # loss/ bilinear upsampling
        self.interp = nn.Upsample(
            size=(args.input_size[1], args.input_size[0]), mode='bilinear', align_corners=True)

        # Initialize feature extractor and part basis for the semantic consistency loss.
        self.zoo_feat_net = FeatureExtraction(
            feature_extraction_cnn=args.ref_net, normalization=args.ref_norm, last_layer=args.ref_layer)
        self.zoo_feat_net.eval()

        self.part_basis_generator = PartBasisGenerator(self.zoo_feat_net.out_dim,
                                                       args.num_parts, normalize=args.ref_norm)
        self.part_basis_generator.cuda(args.gpu)
        self.part_basis_generator.train()

        if args.restore_part_basis != '':
            self.part_basis_generator.load_state_dict(
                {'w': torch.load(args.restore_part_basis)})

        # Initialize optimizers.
        self.optimizer_seg = optim.SGD(self.model.optim_parameters(args),
                                       lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
        self.optimizer_seg.zero_grad()

        self.optimizer_sc = optim.SGD(self.part_basis_generator.parameters(
        ), lr=args.learning_rate_w, momentum=args.momentum, weight_decay=args.weight_decay)
        self.optimizer_sc.zero_grad()

        # visualizor
        self.viz = Visualizer(args)
示例#10
0
def main():

    # create random training samples for 3 categories in range 0-3, 2-5, 4-7
    np.random.seed(5)
    X1 = [
        3 * np.random.random_sample(75) + 0,
        3 * np.random.random_sample(75) + 0
    ]
    X2 = [
        3 * np.random.random_sample(75) + 2,
        3 * np.random.random_sample(75) + 2
    ]
    X3 = [
        3 * np.random.random_sample(75) + 4,
        3 * np.random.random_sample(75) + 4
    ]
    X_train = np.hstack((X1, X2, X3)).T

    # create testing random samples in range 0-7
    X_test = np.array(
        [7 * np.random.random_sample(30), 7 * np.random.random_sample(30)])

    # create labels for training data
    y1 = [1 for _ in range(75)]
    y2 = [2 for _ in range(75)]
    y3 = [3 for _ in range(75)]
    y_train = np.hstack((y1, y2, y3))

    # plot data in different colors
    plt.scatter(X1[0], X1[1], c='r', marker='s', label='X1')
    plt.scatter(X2[0], X2[1], c='b', marker='x', label='X2')
    plt.scatter(X3[0], X3[1], c='lightgreen', marker='o', label='X3')
    plt.scatter(X_test[0], X_test[1], c='black', marker='^', label='test set')
    plt.legend(loc='upper left')
    plt.show()

    # create classifier
    knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
    knn.fit(X_train, y_train)

    # prepare test set and predict labels
    X_test = X_test.T
    y_test = knn.predict(X_test)

    # combine train and test data
    X_combined = np.vstack((X_train, X_test))
    y_combined = np.hstack((y_train, y_test))
    Visualizer.plot_decision_regions(X_combined,
                                     y_combined,
                                     classifier=knn,
                                     test_idx=range(225, 255))
    plt.legend(loc='upper left')
    plt.show()
示例#11
0
    def __init__(self, maze, **settings):
        self.maze = maze
        self.ants = []

        for name, value in settings.items():
            setattr(self, name, value)

        if self.visualize:
            self.visualizer = Visualizer(maze)
            self.visualizer.save('0_initial.png')

        if self.multiprocessing:
            self.pool = multiprocessing.Pool()
示例#12
0
def k_means_clustering(data, featureWeightMap, show=False):
    SimilarityCalc = similarityCalculator.SimilarityCalculator(featureWeightMap)
    attribute_clusters = _compute_k_means_clusters(data, SimilarityCalc.simiarity_according_to_attributes, 5)
    attribute_and_friendship_clusters = _compute_k_means_clusters(data, SimilarityCalc.simiarity_according_to_attributes_and_friendship, 10)
    weighted_attribute_and_friendship_clusters = _compute_k_means_clusters(data, SimilarityCalc.similarity_weighted_attributes_friendship, 3.5)
    
    if show:
        visualizer = Visualizer()
        for personID in data.persons.getOriginalPersons():
            visualizer.visualizeClusters( attribute_clusters[personID] )
            visualizer.visualizeClusters( attribute_and_friendship_clusters[personID] )

    return attribute_clusters, attribute_and_friendship_clusters, weighted_attribute_and_friendship_clusters
示例#13
0
def run_test(args):
    """
    Function for testing a model.
    :param args: command line arguments
    """

    # Create directories for plots if it doesn't exist
    if not os.path.exists(args.plots_dir):
        os.mkdir(args.plots_dir)

    # Create objects for testing and visualization
    visualizer = Visualizer(args.font_dir, args.plots_dir)
    tester = Tester(args)

    # Test a model that predicts values for both valence and arousal
    if tester.dimension == 'both':
        tester.load_model_2d()
        tester.test_2d()

    # Test models that separately predict values for valence and arousal, respectively
    else:
        tester.load_model_1d()
        tester.test_1d()

    if tester.dimension == 'both':
        title = '2D Model'
    else:
        title = '1D Models'

    # Visualize quadrant predictions
    visualizer.plot_quadrant_predictions(tester.valence_dict, tester.arousal_dict, tester.quadrants_dict, title)
    # Visualize valence predictions
    visualizer.plot_valence_predictions(tester.valence_dict, title)
    # Visualize arousal predictions
    visualizer.plot_arousal_predictions(tester.arousal_dict, title)
示例#14
0
def infer(content_path, style_path, model_path, testsize, outdir):

    # Dataset definition
    dataset = CSDataset(c_path=content_path, s_path=style_path, mode="test")
    collator = ImageCollate(test=True)

    # Model definition
    model = Model()
    model.load_state_dict(torch.load(model_path))
    model.cuda()
    model.eval()

    # Visualizer definition
    visualizer = Visualizer()

    dataloader = DataLoader(dataset,
                            batch_size=testsize,
                            shuffle=True,
                            collate_fn=collator,
                            drop_last=True)
    progress_bar = tqdm(dataloader)

    for index, data in enumerate(progress_bar):
        c, s = data

        with torch.no_grad():
            _, _, _, _, y = model(c, s)

        y = y.detach().cpu().numpy()
        c = c.detach().cpu().numpy()
        s = s.detach().cpu().numpy()

        visualizer(c, s, y, outdir, index, testsize)
示例#15
0
    def __init__(
        self,
        config,
        outdir,
        modeldir,
        data_path,
        sketch_path,
    ):

        self.train_config = config["train"]
        self.data_config = config["dataset"]
        model_config = config["model"]
        self.loss_config = config["loss"]

        self.outdir = outdir
        self.modeldir = modeldir

        self.dataset = IllustDataset(
            data_path, sketch_path, self.data_config["line_method"],
            self.data_config["extension"], self.data_config["train_size"],
            self.data_config["valid_size"], self.data_config["color_space"],
            self.data_config["line_space"])
        print(self.dataset)

        gen = Generator(layers=model_config["generator"]["num_layers"],
                        attn_type=model_config["generator"]["attn_type"])
        self.gen, self.gen_opt = self._setting_model_optim(
            gen, model_config["generator"])

        dis = Discriminator()
        self.dis, self.dis_opt = self._setting_model_optim(
            dis, model_config["discriminator"])

        self.lossfunc = StyleAdaINLossCalculator
        self.visualizer = Visualizer(self.data_config["color_space"])
示例#16
0
文件: shiyan.py 项目: hduyuanfu/GAN
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    if opt.vis:
        from visualize import Visualizer
        vis = Visualizer(opt.env)

    # 数据处理
    transforms = transforms.Compose([
                                    transforms.Resize(opt.image_size), #重新设置图片大小,opt.image_size默认值为96
                                    transforms.CenterCrop(opt.image_size), #从中心截取大小为opt.image_size的图片
                                    transforms.ToTensor(), #转为Tensor格式,并将值取在[0,1]中
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) #标准化,得到在[-1,1]的值
                                    ])
    dataset = datasets.ImageFolder(opt.data_path, transform=transforms) #从data中读取图片,图片类别会设置为文件夹名faces
    dataloader = torch.utils.data.DataLoader(dataset, #然后对得到的图片进行批处理,默认一批为256张图,使用4个进程读取数据
                                            batch_size=opt.batch_size,
                                            shuffle=True,
                                            num_workers=opt.num_workers,
                                            drop_last=True  # 什么鬼
                                            )


    # 网络,gnet为生成器,dnet为判别器
    gnet, dnet = GNet(opt), DNet(opt)
    map_location = lambda storage, loc: storage
        if opt.dnet_path:
            dnet.load_state_dict(torch.load(opt.dnet_path, map_location=map_location))
        if opt.gnet_path:
            gnet.load_state_dict(torch.load(opt.gnet_path, map_location=map_location))
示例#17
0
def train(**kwargs):
    vis = Visualizer('GAN')  # 可视化
    for epoch in range(num_epoch):
        for i, (img, _) in tqdm.tqdm(enumerate(dataloader)):
            real_img = img.cuda()

            # train D
            D_optimizer.zero_grad()

            # 将 real_img 判断成 1
            output = D(real_img)
            D_loss_real = criterion(output, true_labels)
            D_loss_real.backward()

            # 将 fake_img 判断成 0
            noises.data.copy_(torch.randn(batch_size, z_dimension, 1, 1))
            fake_img = G(noises).detach()  # draw fake pic
            output = D(fake_img)
            D_loss_fake = criterion(output, fake_labels)
            D_loss_fake.backward()
            D_optimizer.step()

            D_loss = D_loss_real + D_loss_fake
            errorD_meter.add(D_loss.item())

            if i % 5 == 0:  # train G every 5 batches
                G_optimizer.zero_grad()
                noises.data.copy_(torch.randn(batch_size, z_dimension, 1, 1))
                fake_img = G(noises)
                output = D(fake_img)
                G_loss = criterion(output, true_labels)
                G_loss.backward()
                G_optimizer.step()
                errorG_meter.add(G_loss.item())

        if (epoch + 1) % 10 == 0:  # save model every 10 epochs

            if os.path.exists('/tmp/debugGAN'):
                ipdb.set_trace()
            fix_fake_imgs = G(fix_noises)
            vis.images(fix_fake_imgs.detach().cpu().numpy()[:64] * 0.5 + 0.5,
                       win='fake image')
            vis.images(real_img.data.cpu().numpy()[:64] * 0.5 + 0.5,
                       win='real image')
            vis.plot('errorD', errorD_meter.value()[0])
            vis.plot('errorG', errorG_meter.value()[0])

            torchvision.utils.save_image(fix_fake_imgs.data[:64],
                                         '%s/%s.png' % ('imgs', epoch),
                                         normalize=True,
                                         range=(-1, 1))
            torch.save(D.state_dict(), 'checkpoints/D_%s.pth' % epoch)
            torch.save(G.state_dict(), 'checkpoints/G_%s.pth' % epoch)
            errorD_meter.reset()
            errorG_meter.reset()
示例#18
0
def do(data, starting_panel):
    robot = Robot(data, starting_panel)
    while not robot.brain.on_fire:
        robot.step()
    boundaries = Visualizer.boundaries(robot.painted.keys())
    if starting_panel == 1:
        viz = Visualizer(boundaries, 10)
        point_size = 5
    else:
        viz = Visualizer(boundaries, 100)
        point_size = 50
    for p, color in robot.painted.items():
        if color:
            viz.draw_square(p, WHITE, point_size)
    viz.show()
    return len(robot.painted)
def evaluate(experiment_dir, args):
    """
    Evaluate the model stored in the given directory. It loads the latest available checkpoint and iterates over
    the test set.
    Args:
        experiment_dir: The model directory.
        args: Commandline arguments.
    """
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9,
                                allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        test_model, test_data, config, means, vars, stand = create_and_restore_test_model(
            sess, experiment_dir, args)

        print("Evaluating test set ...")
        eval_result = evaluate_model(sess, test_model, test_data, means, vars,
                                     stand)

        if args.export:
            # Export the results into a csv file that can be submitted.
            fname = os.path.join(
                experiment_dir,
                "predictions_in{}_out{}.csv".format(config['source_seq_len'],
                                                    config['target_seq_len']))
            export_results(eval_result, fname)

            # Export a zip file containing the code that generated the results.
            code_files = glob.glob('./*.py', recursive=False)
            export_code(code_files, os.path.join(experiment_dir, 'code.zip'))

        if args.visualize:
            # Visualize the seed sequence and the prediction for some random samples in the test set.
            fk_engine = SMPLForwardKinematics()
            visualizer = Visualizer(fk_engine)
            n_samples_viz = 10
            rng = np.random.RandomState(42)
            idxs = rng.randint(0, len(eval_result), size=n_samples_viz)
            sample_keys = [list(sorted(eval_result.keys()))[i] for i in idxs]
            for k in sample_keys:
                visualizer.visualize(eval_result[k][1],
                                     eval_result[k][0],
                                     title=k)
示例#20
0
def run_train(args):
    """
    Function to train a model.
    :param args: command line arguments
    """
    # Create directories for font, models and plots if they do not exist
    if not os.path.exists(args.font_dir):
        os.mkdir(args.font_dir)
    if not os.path.exists(args.models_dir):
        os.mkdir(args.models_dir)
    if not os.path.exists(args.plots_dir):
        os.mkdir(args.plots_dir)

    # Create objects for training and visualization
    visualizer = Visualizer(args.font_dir, args.plots_dir)
    trainer = Trainer(args)

    for epoch in range(trainer.num_epochs):

        # Train and validate a model to predict values for both valence and arousal
        if trainer.dimension == 'both':
            trainer.train_2d()
            trainer.validate_2d()

        # Train and validate a model to predict values for valence or arousal, according to `dimension`
        else:
            trainer.train_1d()
            trainer.validate_1d()

        # Display epoch every `log_interval`
        if (epoch + 1) % trainer.log_interval == 0 or (epoch + 1) == trainer.num_epochs:
            print_epoch(epoch + 1, trainer.train_dict, trainer.test_dict, trainer.dimension)

        # Update the learing rate every `decay_interval`
        if (epoch + 1) % args.decay_interval == 0:
            trainer.update_learning_rate()

    # Visualize train and validation losses
    visualizer.plot_losses(trainer.train_dict, trainer.test_dict, trainer.dimension)
    # Save the trained model
    trainer.save_model()
示例#21
0
def stitch_with_stitched(result, imageB, info, thresh, vertical=False):
    '''
    stitch the prediction on a stitched image with a raw image.
    '''
    visual = Visualizer()
    print(result[0].shape)

    final_image = make_overlap(result[0], imageB, vertical=vertical)

    prediction_B = get_prediction(imageB, info, display=False)

    if not vertical:
        masks_A, boxes_A = remove_predictions_on_edge(result[1],
                                                      result[2],
                                                      result[0].shape,
                                                      edge='right')
    else:
        masks_A, boxes_A = remove_predictions_on_edge(result[1],
                                                      result[2],
                                                      result[0].shape,
                                                      edge='bottom')

    masks_kept, boxes_kept = remove_overlapped_masks(
        prediction_B.get_field("mask"),
        prediction_B.bbox,
        thresh,
        final_image.shape,
        vertical=vertical)

    stitched_masks, stitched_boxes = visual.display_stitched_instances(
        final_image[:, :, ::-1],
        boxes_A,
        masks_A,
        boxes_kept,
        masks_kept,
        show_mask=False)
    plt.clf()
    stitched_masks = convert_to_torch(stitched_masks)
    stitched_boxes = convert_to_torch(stitched_boxes)

    return [final_image, stitched_masks, stitched_boxes]
示例#22
0
    def __init__(self, config, outdir, modeldir, data_path, sketch_path,
                 ss_path):

        self.train_config = config["train"]
        self.data_config = config["dataset"]
        model_config = config["model"]
        self.loss_config = config["loss"]

        self.outdir = outdir
        self.modeldir = modeldir

        self.dataset = IllustDataset(
            data_path, sketch_path, ss_path, self.data_config["line_method"],
            self.data_config["extension"], self.data_config["train_size"],
            self.data_config["valid_size"], self.data_config["color_space"],
            self.data_config["line_space"])
        print(self.dataset)

        gen = Generator(model_config["generator"]["in_ch"],
                        num_layers=model_config["generator"]["num_layers"],
                        attn_type=model_config["generator"]["attn_type"],
                        guide=model_config["generator"]["guide"])
        self.gen, self.gen_opt = self._setting_model_optim(
            gen, model_config["generator"])
        self.guide = model_config["generator"]["guide"]

        i_dis = Discriminator(model_config["image_dis"]["in_ch"],
                              model_config["image_dis"]["multi"])
        self.i_dis, self.i_dis_opt = self._setting_model_optim(
            i_dis, model_config["image_dis"])

        s_dis = Discriminator(model_config["surface_dis"]["in_ch"],
                              model_config["surface_dis"]["multi"])
        self.s_dis, self.s_dis_opt = self._setting_model_optim(
            s_dis, model_config["surface_dis"])

        t_dis = Discriminator(model_config["texture_dis"]["in_ch"],
                              model_config["texture_dis"]["multi"])
        self.t_dis, self.t_dis_opt = self._setting_model_optim(
            t_dis, model_config["texture_dis"])

        self.guided_filter = GuidedFilter(r=5, eps=2e-1)
        self.guided_filter.cuda()

        self.out_guided_filter = GuidedFilter(r=1, eps=1e-2)
        self.out_guided_filter.cuda()

        self.vgg = Vgg19(requires_grad=False)
        self.vgg.cuda()
        self.vgg.eval()

        self.lossfunc = WhiteBoxLossCalculator()
        self.visualizer = Visualizer(self.data_config["color_space"])
示例#23
0
def preprocess_data(args):
    """
    Function to process the data and create data sets.
    :param args: command line arguments
    """

    # Create directories for font and plots if they do not exist
    if not os.path.exists(args.font_dir):
        os.mkdir(args.font_dir)
    if not os.path.exists(args.plots_dir):
        os.mkdir(args.plots_dir)

    # Create objects for preprocessing and visualization
    data_preprocessor = DataPreprocessor(args)
    visualizer = Visualizer(args.font_dir, args.plots_dir)

    # Get information from DEAM files
    data_preprocessor.get_data_info()
    # Get waveforms from audio
    data_preprocessor.get_waveforms()
    # Augment dataset
    data_preprocessor.augment_quadrants()
    # Create sets for train and test
    data_preprocessor.make_train_test_sets()

    # Visualize data distribution in quadrants
    visualizer.visualize_data_distribution(data_preprocessor.annotations, data_preprocessor.quadrants)
    # Visualize data distribution on each valence and arousal dimensions
    visualizer.visualize_dimensions_distribution(data_preprocessor.annotations)
示例#24
0
def do_part_2(station, data):
    viz = Visualizer((0, 0, 36, 36), 20, flip_vertical=False)
    print(f"\nThe asteroid can see this:")
    asteroids = init_asteroids(data)
    asteroids_by_degrees = list()
    for a in asteroids_by_angle(station, asteroids):
        asteroids_by_degrees.append((rad_to_deg(a[0]), *a))
    # for a in sorted(asteroids_by_degrees):
    #     print(a)

    sets_by_angle = defaultdict(list)
    for a in sorted(asteroids_by_degrees):
        sets_by_angle[a[0]].append(a[3])

    for k, v in sets_by_angle.items():
        if len(v) > 0:
            print(k, len(v), v)
            for a in v:
                if a.xy == (0, 2):
                    print('^^^^^^^^')

    asteroids_shot = 0
    last_asteroid_shot = None
    while asteroids_shot < 200:
        for k, v in sets_by_angle.items():
            last_asteroid_shot = v[0]
            sets_by_angle[k] = v[1:]
            asteroids_shot += 1
            viz.draw_line((station.xy, last_asteroid_shot.xy), COLORS[1], 2)
            viz.draw_point(last_asteroid_shot.xy, COLORS[2], 4)
            print(
                f"Boom, shot asteroid nr. {asteroids_shot}: {last_asteroid_shot}"
            )
            if asteroids_shot == 200:
                code = last_asteroid_shot.x * 100 + last_asteroid_shot.y
                print(f"200th asteroid shot is: {last_asteroid_shot}: {code}")
                break
    viz.draw_point(station.xy, COLORS[0], 10)
    viz.show()
    return 0
示例#25
0
    def __init__(
        self,
        config,
        outdir,
        modeldir,
        data_path,
        sketch_path,
    ):

        self.train_config = config["train"]
        self.data_config = config["dataset"]
        model_config = config["model"]
        self.loss_config = config["loss"]

        self.outdir = outdir
        self.modeldir = modeldir
        self.mask = self.train_config["mask"]

        self.dataset = IllustDataset(data_path, sketch_path,
                                     self.data_config["extension"],
                                     self.data_config["train_size"],
                                     self.data_config["valid_size"],
                                     self.data_config["color_space"],
                                     self.data_config["line_space"])
        print(self.dataset)

        if self.mask:
            in_ch = 6
        else:
            in_ch = 3

        loc_gen = LocalEnhancer(
            in_ch=in_ch,
            num_layers=model_config["local_enhancer"]["num_layers"])
        self.loc_gen, self.loc_gen_opt = self._setting_model_optim(
            loc_gen, model_config["local_enhancer"])

        glo_gen = GlobalGenerator(in_ch=in_ch)
        self.glo_gen, self.glo_gen_opt = self._setting_model_optim(
            glo_gen, model_config["global_generator"])

        dis = Discriminator(model_config["discriminator"]["in_ch"],
                            model_config["discriminator"]["multi"])
        self.dis, self.dis_opt = self._setting_model_optim(
            dis, model_config["discriminator"])

        self.vgg = Vgg19(requires_grad=False)
        self.vgg.cuda()
        self.vgg.eval()

        self.lossfunc = Pix2pixHDCalculator()
        self.visualizer = Visualizer(self.data_config["color_space"])
示例#26
0
    def __init__(self, config, outdir, modeldir, data_path, sketch_path,
                 ss_path):

        self.train_config = config["train"]
        self.data_config = config["dataset"]
        model_config = config["model"]
        self.loss_config = config["loss"]

        self.outdir = outdir
        self.modeldir = modeldir

        self.dataset = IllustDataset(
            data_path, sketch_path, ss_path, self.data_config["line_method"],
            self.data_config["extension"], self.data_config["train_size"],
            self.data_config["valid_size"], self.data_config["color_space"],
            self.data_config["line_space"])
        print(self.dataset)

        gen = Generator(model_config["generator"]["in_ch"],
                        base=model_config["generator"]["base"],
                        num_layers=model_config["generator"]["num_layers"],
                        up_layers=model_config["generator"]["up_layers"],
                        guide=model_config["generator"]["guide"],
                        resnext=model_config["generator"]["resnext"],
                        encoder_type=model_config["generator"]["encoder_type"])
        self.gen, self.gen_opt = self._setting_model_optim(
            gen, model_config["generator"])
        self.guide = model_config["generator"]["guide"]

        dis = Discriminator(model_config["discriminator"]["in_ch"],
                            model_config["discriminator"]["multi"],
                            base=model_config["discriminator"]["base"],
                            sn=model_config["discriminator"]["sn"],
                            resnext=model_config["discriminator"]["resnext"],
                            patch=model_config["discriminator"]["patch"])
        self.dis, self.dis_opt = self._setting_model_optim(
            dis, model_config["discriminator"])

        self.vgg = Vgg19(requires_grad=False, layer="four")
        self.vgg.cuda()
        self.vgg.eval()

        self.out_filter = GuidedFilter(r=1, eps=1e-2)
        self.out_filter.cuda()

        self.lossfunc = LossCalculator()
        self.visualizer = Visualizer(self.data_config["color_space"])

        self.scheduler_gen = torch.optim.lr_scheduler.ExponentialLR(
            self.gen_opt, self.train_config["gamma"])
        self.scheduler_dis = torch.optim.lr_scheduler.ExponentialLR(
            self.dis_opt, self.train_config["gamma"])
示例#27
0
def stitch(imageA, imageB, info, thresh=100, vertical=False):
    '''
    get the prediction of imageA and imageB and stitch the prediction together
    '''
    result = make_overlap(imageA, imageB, vertical=vertical)

    prediction_A = get_prediction(imageA, info, display=False)
    prediction_B = get_prediction(imageB, info, display=False)

    if not vertical:
        masks_A, boxes_A = remove_predictions_on_edge(
            prediction_A.get_field("mask"),
            prediction_A.bbox,
            imageA.shape,
            edge='right')
    else:
        masks_A, boxes_A = remove_predictions_on_edge(
            prediction_A.get_field("mask"),
            prediction_A.bbox,
            imageA.shape,
            edge='bottom')

    masks_kept, boxes_kept = remove_overlapped_masks(
        prediction_B.get_field("mask"),
        prediction_B.bbox,
        thresh,
        result.shape,
        vertical=vertical)

    visual = Visualizer()
    stitched_masks, stitched_boxes = visual.display_stitched_instances(
        result[:, :, ::-1],
        boxes_A,
        masks_A,
        boxes_kept,
        masks_kept,
        show_mask=False)
    plt.clf()
    return result, stitched_masks, stitched_boxes
示例#28
0
    def __init__(self, maze, **settings):
        self.maze = maze
        self.ants = []

        for name, value in settings.items():
            setattr(self, name, value)

        if self.visualize:
            self.visualizer = Visualizer(maze)
            self.visualizer.save('0_initial.png')

        if self.multiprocessing:
            self.pool = multiprocessing.Pool()
示例#29
0
def stitch_two_stitched(imageA, imageB, thresh, vertical=True):
    '''
    stitch the stitched images together from the other direction.
    '''
    visual = Visualizer()

    final_image = make_overlap(imageA[0], imageB[0], vertical=vertical)

    if not vertical:
        masks_A, boxes_A = remove_predictions_on_edge(imageA[1],
                                                      imageA[2],
                                                      imageA[0].shape,
                                                      edge='right')
    else:
        masks_A, boxes_A = remove_predictions_on_edge(imageA[1],
                                                      imageA[2],
                                                      imageA[0].shape,
                                                      edge='bottom')

    masks_kept, boxes_kept = remove_overlapped_masks(imageB[1],
                                                     imageB[2],
                                                     thresh,
                                                     final_image.shape,
                                                     vertical=vertical)
    #print(len(masks_kept))
    stitched_masks, stitched_boxes = visual.display_stitched_instances(
        final_image[:, :, ::-1],
        boxes_A,
        masks_A,
        boxes_kept,
        masks_kept,
        show_mask=False)
    plt.clf()
    stitched_masks = convert_to_torch(stitched_masks)
    stitched_boxes = convert_to_torch(stitched_boxes)

    return [final_image, stitched_masks, stitched_boxes]
示例#30
0
def main(args):
    
    # check cuda availabilty
    if torch.cuda.is_available(): 
        device = 'cuda'
        os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_idx
    
    else:
        device = 'cpu'

    # for data loader
    train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10('../data', train=True, download=True, 
                transform=transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                    ])),
                batch_size=args.batch_size, shuffle=True)
    
    test_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10('../data', train=False, download=True, 
                transform=transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                    ])),
                batch_size=args.batch_size)
    
    print('magnitude function : ', args.magnitude, '\tangular function : ', args.angular)
    
    # model
    model = DCNet(magnitude=args.magnitude, angular=args.angular, device=device).to(device)
    
    # TODO: 모델 파라미터 확인해보기  
    # optimizer
    optimizer = optim.SGD(model.parameters(), lr=2e-3, momentum=0.9)

    # visualize
    visualizer = Visualizer(lr=100)
    
    # train
    for epoch in range(1, args.epochs +1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader, visualizer)

    # save
    torch.save(model.state_dict(), "mnist.pt")
示例#31
0
    def __init__(self,
                 config,
                 outdir,
                 outdir_fix,
                 modeldir,
                 data_path,
                 sketch_path=None):

        self.train_config = config["train"]
        self.data_config = config["dataset"]
        model_config = config["model"]
        self.loss_config = config["loss"]

        self.outdir = outdir
        self.outdir_fix = outdir_fix
        self.modeldir = modeldir

        self.dataset = DanbooruFacesDataset(data_path, sketch_path,
                                            self.data_config["extension"],
                                            self.data_config["train_size"],
                                            self.data_config["valid_size"],
                                            self.data_config["color_space"],
                                            self.data_config["line_space"])

        print(self.dataset)

        gen = Generator(
            model_config["generator"]["in_ch"],
            latent_dim=model_config["generator"]["l_dim"],
            num_layers=model_config["generator"]["num_layers"],
        )
        self.gen, self.gen_opt = self._setting_model_optim(
            gen, model_config["generator"])

        dis = Discriminator(
            model_config["discriminator"]["in_ch"],
            multi_pattern=model_config["discriminator"]["multi"])
        self.dis, self.dis_opt = self._setting_model_optim(
            dis, model_config["discriminator"])
        enc = LatentEncoder(latent_dim=model_config["encoder"]["l_dim"])
        self.enc, self.enc_opt = self._setting_model_optim(
            enc, model_config["encoder"])

        self.lossfunc = DiverseColorizeLossCalculator()
        self.visualizer = Visualizer(self.data_config["color_space"])
示例#32
0
def multi_stitching_second_direction(stitched, overlap=100, vertical=True):
    '''
    stitch the stitched images together from the other direction.
    stitched: a list of vertically or horizontally stitched images.
    '''
    vis = Visualizer()

    result_info = stitch_two_stitched(stitched[0],
                                      stitched[1],
                                      overlap,
                                      vertical=vertical)
    for k in range(2, len(stitched)):
        print("Number: ", k)
        result_info = stitch_two_stitched(result_info,
                                          stitched[k],
                                          overlap,
                                          vertical=vertical)
    return result_info
示例#33
0
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    device=t.device('cuda') if opt.gpu else t.device('cpu')
    if opt.vis:
        from visualize import Visualizer
        vis = Visualizer(opt.env)

    # 数据
    transforms = tv.transforms.Compose([
        tv.transforms.Resize(opt.image_size),
        tv.transforms.CenterCrop(opt.image_size),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = tv.datasets.ImageFolder(opt.data_path, transform=transforms)
    dataloader = t.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=opt.num_workers,
                                         drop_last=True
                                         )

    # 网络
    netg, netd = NetG(opt), NetD(opt)
    map_location = lambda storage, loc: storage
    if opt.netd_path:
        netd.load_state_dict(t.load(opt.netd_path, map_location=map_location))
    if opt.netg_path:
        netg.load_state_dict(t.load(opt.netg_path, map_location=map_location))
    netd.to(device)
    netg.to(device)


    # 定义优化器和损失
    optimizer_g = t.optim.Adam(netg.parameters(), opt.lr1, betas=(opt.beta1, 0.999))
    optimizer_d = t.optim.Adam(netd.parameters(), opt.lr2, betas=(opt.beta1, 0.999))
    criterion = t.nn.BCELoss().to(device)

    # 真图片label为1,假图片label为0
    # noises为生成网络的输入
    true_labels = t.ones(opt.batch_size).to(device)
    fake_labels = t.zeros(opt.batch_size).to(device)
    fix_noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)
    noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)

    errord_meter = AverageValueMeter()
    errorg_meter = AverageValueMeter()


    epochs = range(opt.max_epoch)
    for epoch in iter(epochs):
        for ii, (img, _) in tqdm.tqdm(enumerate(dataloader)):
            real_img = img.to(device)

            if ii % opt.d_every == 0:
                # 训练判别器
                optimizer_d.zero_grad()
                ## 尽可能的把真图片判别为正确
                output = netd(real_img)
                error_d_real = criterion(output, true_labels)
                error_d_real.backward()

                ## 尽可能把假图片判别为错误
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises).detach()  # 根据噪声生成假图
                output = netd(fake_img)
                error_d_fake = criterion(output, fake_labels)
                error_d_fake.backward()
                optimizer_d.step()

                error_d = error_d_fake + error_d_real

                errord_meter.add(error_d.item())

            if ii % opt.g_every == 0:
                # 训练生成器
                optimizer_g.zero_grad()
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises)
                output = netd(fake_img)
                error_g = criterion(output, true_labels)
                error_g.backward()
                optimizer_g.step()
                errorg_meter.add(error_g.item())

            if opt.vis and ii % opt.plot_every == opt.plot_every - 1:
                ## 可视化
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()
                fix_fake_imgs = netg(fix_noises)
                vis.images(fix_fake_imgs.detach().cpu().numpy()[:64] * 0.5 + 0.5, win='fixfake')
                vis.images(real_img.data.cpu().numpy()[:64] * 0.5 + 0.5, win='real')
                vis.plot('errord', errord_meter.value()[0])
                vis.plot('errorg', errorg_meter.value()[0])

        if (epoch+1) % opt.save_every == 0:
            # 保存模型、图片
            tv.utils.save_image(fix_fake_imgs.data[:64], '%s/%s.png' % (opt.save_path, epoch), normalize=True,
                                range=(-1, 1))
            t.save(netd.state_dict(), 'checkpoints/netd_%s.pth' % epoch)
            t.save(netg.state_dict(), 'checkpoints/netg_%s.pth' % epoch)
            errord_meter.reset()
            errorg_meter.reset()
示例#34
0
            print stat_testing_values, '='

            for model, name in zip(models, model_names):

                clf = model
                clf.fit(stat_training_attrs, stat_training_values)
                stat_pred = clf.predict(stat_training_attrs)
                print stat_pred
                diff =[]
                for trueValue, predValue in zip(stat_testing_values, stat_pred):
                    diff.append(abs(trueValue - predValue))
                print 'Errors for model', name, ':', sum(diff), 'on value set', value_set

    # Visualize data
    if args.v:
        visualizer = Visualizer()
        visualizer.visualize(data, EDGE_FUNCS[args.edge], split=args.split,
                save=args.save, show=args.show)

    if args.p:
        # Select prediction method
        if args.p == 'kmeans':
            print 'Using k-means clustering metric.'
            attribute_clusters, attribute_and_friendship_clusters, weighted_attribute_and_friendship_clusters = k_means_clustering(data, featureWeightMap, args.show)
            
            attribute_clusters = _convert_kmeans_format(attribute_clusters)
            attribute_and_friendship_clusters = _convert_kmeans_format(attribute_and_friendship_clusters)
            weighted_attribute_and_friendship_clusters = _convert_kmeans_format(weighted_attribute_and_friendship_clusters)
            
            real_training_data = 'real_training_data.csv'
            kmeans_attrs = 'kmeans_attrs.csv'
示例#35
0
class ACO(object):
    '''
    Perform ACO on the maze.
    '''

    iterations = 15

    evaporation = 0.1

    # Initialize Q to high value
    Q = 10000

    # update Q using the minimum path length  as value.
    update_Q = False

    ant_count = 10

    # Number of steps an ant may wander before it is terminated for that
    # iterations.
    ant_max_steps = 10000
    update_max_steps = False

    # Wether or not to optimize the trails of the ants after they found the end.
    optimize_ants = True

    visualize = True
    quiet = False

    multiprocessing = True

    do_reconnaissance = 4000

    maze_elimination = True

    def __init__(self, maze, **settings):
        self.maze = maze
        self.ants = []

        for name, value in settings.items():
            setattr(self, name, value)

        if self.visualize:
            self.visualizer = Visualizer(maze)
            self.visualizer.save('0_initial.png')

        if self.multiprocessing:
            self.pool = multiprocessing.Pool()

    def delta_matrix(self, ant):
        delta_tau = np.zeros((self.maze.height, self.maze.width))

        unique_positions = list(set(ant.position_list))
        delta_tau_k = self.Q / len(unique_positions)

        for x, y in unique_positions:
            delta_tau[y][x] += delta_tau_k

        return delta_tau

    def reconnaissance(self, iterations=1):
        maze = self.maze
        if self.do_reconnaissance < 1:
            return maze

        print 'performing reconnaissance with %d ants for %d steps in %d iterations' % (
            self.ant_count, self.do_reconnaissance, iterations
        )

        disabled = set()
        start_time = time.time()
        for iteration in range(iterations):
            ants = []
            for i in range(self.ant_count):
                ants.append(Ant(maze, maze.start))

            results = self.pool.map_async(
                ant_loop_apply, itertools.izip(ants, [self.do_reconnaissance] * self.ant_count)
            ).get(999999)

            for ant in results:
                for disable in ant.disable_positions:
                    maze.disable_at(disable)
                    disabled.add(disable)

        print 'Reconnaissance done, %d cells disabled in %0.2fs' % (
            len(disabled),
            time.time() - start_time
        )
        return maze

    def run(self):
        if not self.quiet:
            print 'starting with ACO with %d ants for %d iterations' % (
                self.ant_count, self.iterations
            )
        maze = self.maze

        self.iteration_best_trail = []

        # initialize ants
        for k in range(self.ant_count):
            self.ants.append(Ant(maze, maze.start))

        global_best = iteration_best = None
        for i in range(self.iterations):
            if not self.quiet:
                print '\nIteration: %d, Q: %d, max_steps: %d' % (i, self.Q, self.ant_max_steps)

            if self.multiprocessing:
                # Make ants do their steps.
                self.ants = self.pool.map_async(
                    ant_loop_apply, itertools.izip(self.ants, [self.ant_max_steps] * self.ant_count)
                ).get(9999999)
            else:
                print 'Stepping...'
                for ant in self.ants:
                    i = 0
                    while not ant.done and len(ant.trail) < self.ant_max_steps:
                        ant.step()

                        i += 1
                        if i % 1000 == 1:
                            self.visualizer.update('stepping: %d' % i)

                    if not ant.done:
                        print 'moving to next ant, this one stuck in', ant.position

            done_ants = [a for a in self.ants if a is not None and a.done]

            if not self.quiet:
                print '%d out of %d ants finished within %d steps.' % (
                    len(done_ants),
                    self.ant_count,
                    self.ant_max_steps
                )

            if self.optimize_ants:
                # optimize the trails for these ants
                opts = []
                for ant in done_ants:
                    opts.append(ant.optimize_trail(quiet=self.quiet))
                if not self.quiet:
                    print 'Optimisation reduced trail langth with an average of', mean(opts)

            # disable the dead ends found by the ant
            if self.maze_elimination:
                for ant in self.ants:
                    if ant is not None:
                        for p in ant.disable_positions:
                            self.maze.disable_at(p)

            # select the best ant:
            if len(done_ants) > 0:
                iteration_best = min(done_ants)

                # if global_best becomes invalid, forget it.
                # if global_best is not None:
                #     global_best.maze = self.maze
                #     if not global_best.is_valid():
                #         global_best = None
                #         if not self.quiet:
                #             print 'Forgot global best!'

                if global_best is None:
                    global_best = iteration_best.clone()
                else:
                    global_best = min([iteration_best, global_best]).clone()

            # update pheromone in the maze, for unique positions
            deltas = np.zeros((self.maze.height, self.maze.width))
            if global_best is not None:
                deltas = self.delta_matrix(global_best)

            if iteration_best is not None and global_best is not iteration_best:
                deltas += self.delta_matrix(iteration_best)

            # only update if iteration returned something.
            if iteration_best is not None:
                self.iteration_best_trail.append(len(iteration_best.trail))
            else:
                self.iteration_best_trail.append(None)

            maze.update_tau(delta_tau=deltas, evaporation=self.evaporation)

            # update ant_max_steps to the max value of this iteration
            if len(done_ants) > 3:
                if self.update_max_steps:
                    try:
                        self.ant_max_steps = min(
                            self.ant_max_steps,
                            max(len(x.trail) for x in done_ants if len(x.trail) < self.ant_max_steps)
                        )
                    except:
                        pass
                if self.update_Q:
                    self.Q = min(min(len(x.trail) for x in self.ants), self.Q)

            if not self.quiet:
                if iteration_best is not None and global_best is not None:
                    print 'Best ant: %d, iteration best: %d' % (
                        len(global_best.trail),
                        len(iteration_best.trail)
                    )
                else:
                    print 'None of the ants finished stepping'

            # reset ants
            for ant in self.ants:
                ant.reset(maze)

            if self.visualize:
                self.visualizer.update('Pheromone level iteration %d' % i)
                self.visualizer.save('%dth_iteration.png' % i)

        if self.multiprocessing:
            self.interrupt()

        self.global_best = global_best
        return global_best

    def interrupt(self):
        if self.multiprocessing:
            self.pool.close()
            self.pool.join()

    def get_first_iteration_with_best_trail(self):
        trail_length = len(self.global_best.trail)

        for i, val in enumerate(self.iteration_best_trail):
            if val == trail_length:
                return i