Example #1
0
 def show_pairs(self):
     number_of_pairs = len(self)
     indexes = random.sample(range(number_of_pairs), k=4)
     pairs = [self.__getitem__(index) for index in indexes]
     lrs = [lr for _, lr in pairs]
     hrs = [hr for hr, _ in pairs]
     show_images(lrs + hrs, 2, 4)
Example #2
0
 def show_samples(self):
     # get some random training images
     samples = next(iter(self.train_loader))
     print("Dimensions:")
     print("bg         :", list(samples["bg"].shape)[1:])
     print("fg_bg      :", list(samples["fg_bg"].shape)[1:])
     print("fg_bg_mask :", list(samples["fg_bg_mask"].shape)[1:])
     print("fg_bg_depth:", list(samples["fg_bg_depth"].shape)[1:], "\n")
     num_img = 4
     images = []
     keys = ("bg", "fg_bg", "fg_bg_mask", "fg_bg_depth")
     for i in range(num_img):
         for k in keys:
             # if k in ("bg", "fg_bg"):
             images.append(
                 denormalize(samples[k][i],
                             mean=self.stats[k]["mean"],
                             std=self.stats[k]["std"]))
             # else:
             # 	images.append(samples[k][i])
     show_images(images,
                 cols=num_img,
                 figsize=(6, 6),
                 show=True,
                 titles=keys)
    def find_blocks(self, debug=False, debug_small=False):
        hue = self.crop_hsv[:, :, 0]
        sat = self.crop_hsv[:, :, 1]
        val = self.crop_hsv[:, :, 2]

        blocks_mask = logical_and(sat > 0.2, val > 0.3)
        blocks_mask = erosion(blocks_mask, self.mini_selem)
        blocks_mask = dilation(blocks_mask, self.selem)

        def seek_color(hue_start, hue_end):
            mask = logical_and(blocks_mask,
                               logical_and(hue > hue_start, hue < hue_end))
            mask = erosion(mask, self.mini_selem)
            return mask

        masks = {
            name: seek_color(hue_start, hue_end)
            for name, (hue_start, hue_end) in self.COLOR_BLOCKS.items()
        }

        if debug:
            show_images([
                ('all', blocks_mask, 'gray'),
            ] + [(name, mask, make_colormap(name))
                 for name, mask in masks.items()])

        labels = {name: label(mask) for name, mask in masks.items()}
        return labels
def train(discriminator, generator, show_every=250, num_epochs=20):
    start_t = time.time()
    iter_count = 0
    discriminator.add_optimizer()
    generator.add_optimizer()
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            discriminator.optimizer.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            logits_real = discriminator.forward(
                2 * (real_data.view(batch_size, -1) - 0.5)).type(
                    torch.FloatTensor)

            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed)
            logits_fake = discriminator.forward(fake_images.detach().view(
                batch_size, -1))

            d_total_error = discriminator.loss(logits_real, logits_fake)
            d_total_error.backward()
            discriminator.optimizer.step()

            generator.optimizer.zero_grad()
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed)

            gen_logits_fake = discriminator.forward(
                fake_images.view(batch_size, -1))
            g_loss = generator.loss(gen_logits_fake)
            g_loss.backward()
            generator.optimizer.step()

            # if (iter_count % show_every == 0):
            #     print('Iter: {}, D: {:.4}, G:{:.4}'.format(iter_count,d_total_error.data[0],g_loss.data[0]))
            #     imgs_numpy = fake_images.data.cpu().numpy()
            #     plot_batch_images(imgs_numpy[0:16], iter_num=iter_count)
            #     print()
            #
            if (iter_count % show_every == 0):
                checkpt_t = time.time()
                print("time : {:.2f} sec".format(checkpt_t - start_t))
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(
                    iter_count, d_total_error.data[0], g_loss.data[0]))
                print("real logits average ", torch.mean(logits_real).data)
                print("average output generator : ",
                      torch.mean(fake_images).data)
                print("fake logits average ", torch.mean(gen_logits_fake).data)
                imgs = fake_images[:16].data.numpy()
                show_images(imgs,
                            iter_num=iter_count,
                            save=True,
                            show=False,
                            model=generator.label)
            iter_count += 1
Example #5
0
 def show_sample(self, split="train"):
     assert split in ["train", "val", "test"], f"Invalid {split}"
     if hasattr(self, f"{split}_data"):
         loader = getattr(self, f"{split}_loader")()
         print(f"No. of batches in {split}: ", len(loader))
         x, y, z = next(iter(loader))
         show_images(torch.cat((x, y, z)))
     else:
         print(f"Split {split} not found")
Example #6
0
    def train(self,
              x_train,
              batch_size,
              epochs,
              run_folder,
              print_every_n_batches=50,
              using_generator=False):
        """
        this function is used to train the dataset
        :param x_train: the input image
        :param batch_size: the batch size
        :param epochs: the total epochs
        :param run_folder: the folder to save image
        :param print_every_n_batches: the batches number to print
        :param using_generator: whether use generator or not
        """

        start_time = time.time()
        init_epoch = self.epoch

        for epoch in range(self.epoch, self.epoch + epochs):

            d = self.train_discriminator(x_train, batch_size, using_generator)
            g = self.train_generator(batch_size)

            self.d_losses.append(d)
            self.g_losses.append(g)

            if epoch % print_every_n_batches == 0:
                print("Epoch: %d ", epoch)
                print("Discriminator loss: (%.3f)(Real %.3f, Fake %.3f)", d[0],
                      d[1], d[2])
                print("Discriminator accuracy: (%.3f)(Real %.3f,Fake %.3f)",
                      d[3], d[4], d[5])
                print("Generator loss: %.3f,  Generator accuracy: %.3f]", g[0],
                      g[1])

                image_path = os.path.join(run_folder,
                                          "images/sample_%d.png" % self.epoch)
                self.sample_images(batch_size, image_path)
                self.model.save_weights(
                    os.path.join(run_folder,
                                 'weights/weights-%d.h5' % (epoch)))
                self.model.save_weights(
                    os.path.join(run_folder, 'weights/weights.h5'))
                self.save_model(run_folder)

                # Print some performance statistics.
                end_time = time.time()
                current_epoch = epoch - init_epoch
                time_taken = end_time - start_time
                print("Current epoch: %i,  time since start: %.1f sec" %
                      (current_epoch, time_taken))

                show_images(image_path)
                # Generate and show some predictions.
            self.epoch += 1
Example #7
0
def apply(img, aug, n=4):
    # 转成float,一是因为aug需要float类型数据来方便做变化。
    # 二是这里会有一次copy操作,因为有些aug直接通过改写输入
    #(而不是新建输出)获取性能的提升
    X = [aug(img.astype('float32')) for _ in range(n * n)]
    # 有些aug不保证输入是合法值,所以做一次clip
    # 显示浮点图片时imshow要求输入在[0,1]之间
    Y = nd.stack(*X).clip(0, 255) / 255
    utils.show_images(Y, n, n, figsize=(8, 8))
def test_readdata():
    logging.info('读取数据集 ...')
    train_imgs = ImageFolder(os.path.join(data_dir, 'train'))
    test_imgs = ImageFolder(os.path.join(data_dir, 'test'))

    # 查看图像
    hotdogs = [train_imgs[i][0] for i in range(8)]
    not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]
    show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4)
    plt.show()
Example #9
0
def get_train_valid_loader(
    train_data,
    valid_data,
    batch_size=4,
    valid_size=0.1,
    show_sample=False,
    num_workers=NUM_WORKERS,
    pin_memory=False,
    shuffle=True,
    seed=SEED,
):
    error_msg = "[!] valid_size should be in the range [0, 1]."
    assert (valid_size >= 0) and (valid_size <= 1), error_msg

    num_train = len(train_data)
    indices = list(range(num_train))
    split = int(np.floor(valid_size * num_train))

    if shuffle:
        np.random.seed(seed)
        np.random.shuffle(indices)

    train_idx, valid_idx = indices[split:], indices[:split]
    train_dataset = torch.utils.data.Subset(train_data, train_idx)
    valid_dataset = torch.utils.data.Subset(valid_data, valid_idx)

    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=pin_memory,
    )
    valid_loader = DataLoader(
        valid_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=pin_memory,
    )

    print("Training Batches: ", len(train_loader))
    print("Validation Batches: ", len(valid_loader))

    # visualize some images
    if show_sample:
        x, y, z = next(iter(train_loader))
        show_images(torch.cat((x, y, z)))
        x, y, z = next(iter(valid_loader))
        show_images(torch.cat((x, y, z)))

    return train_loader, valid_loader
Example #10
0
def main():

    graph = tf.Graph()
    with graph.as_default():
        with gfile.FastGFile(utils.PATH_TO_MERGED_GRAPH, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')

    # input tensor
    image_input = graph.get_tensor_by_name('image_input:0')

    # output tensors
    tensors = [
        "class/final_result", "detect/detection_classes",
        "detect/num_detections", "detect/detection_scores",
        "detect/detection_boxes"
    ]
    tensor_dict = {out: '%s:0' % out for out in tensors}
    tensor_dict = {
        out: graph.get_tensor_by_name(name)
        for out, name in tensor_dict.items()
    }

    # examples
    examples_number = 32
    examples = pd.read_csv(utils.LABELLING_OUTPUT_PATH)
    examples = examples.sample(frac=1).reset_index(drop=True)
    examples = examples.iloc[:examples_number]
    examples['image'] = utils.get_images(examples['path_to_image'])

    # display
    utils.show_images(examples, utils.MAPPER_CLASSIFICATION)

    with tf.Session(graph=graph) as sess:

        for _, row in examples.iterrows():
            image = utils.transform_image(row['image'])
            result = sess.run(tensor_dict, {image_input: image})
            result = {key: value[0] for key, value in result.items()}

            detections = utils.construct_detection_df(result)
            classification: np.ndarray = result['class/final_result']
            classification = utils.decode_classification(classification)
            print(detections)
            print(classification)

            image = utils.visualize_boxes_and_labels(row['image'], detections)
            plt.imshow(image)
            plt.show()
Example #11
0
    def __init__(
        self,
        data_path,
        batch_size=64,
        img_resolution=128,
        split_option=1,
        test_size=0.3,
        kshot=5,
        force_reload_data=False,
    ):

        SPLIT_BY_LABEL = 1
        NORMAL_SPLIT = 2

        self.img_resolution = img_resolution
        self.batch_size = batch_size
        self.force_reload_data = force_reload_data
        self.loaddata(data_path)

        self.init_labels = np.copy(self.labels)

        # filter identities have more than 1 image
        self.x, self.labels = self.filter_one_image(self.x, self.labels)

        self.x = utils.norm(self.x)

        self.x, self.x_test, self.labels, self.labels_test = (
            utils.split_by_label(self.x, self.labels, test_size=test_size)
            if split_option == SPLIT_BY_LABEL else train_test_split(
                self.x, self.labels, test_size=test_size))

        (
            self.x_test,
            self.x_support,
            self.labels_test,
            self.labels_support,
        ) = self.support_split(self.x_test, self.labels_test, kshot=kshot)

        # convert string label to numberical label
        _, self.y = np.unique(self.labels, return_inverse=True)
        _, self.y_test = np.unique(self.labels_test, return_inverse=True)
        _, self.y_support = np.unique(self.labels_support, return_inverse=True)

        self.classes = np.unique(self.y)
        self.per_class_ids = {}
        ids = np.arange(len(self.x))
        for c in self.classes[:3]:
            self.per_class_ids[c] = ids[self.y == c]
            utils.show_images(self.x[self.per_class_ids[c]][:10], True, False)
Example #12
0
def attack(**kwargs):
    parsing.set_log_level(kwargs['log_level'])

    if kwargs['deterministic']:
        if kwargs['seed'] is None:
            logger.warning('Determinism is enabled, but no seed has been provided.')

        utils.enable_determinism()

    logger.debug('Running attack command with kwargs %s.', kwargs)

    if kwargs['cpu_threads'] is not None:
        torch.set_num_threads(kwargs['cpu_threads'])

    if kwargs['seed'] is not None:
        utils.set_seed(kwargs['seed'])

    model = parsing.parse_model(kwargs['domain'], kwargs['architecture'],
                              kwargs['state_dict_path'], False, kwargs['masked_relu'], False, load_weights=True)
    model.eval()
    model.to(kwargs['device'])

    dataset = parsing.parse_dataset(kwargs['domain'], kwargs['dataset'],
                                    dataset_edges=(kwargs['start'], kwargs['stop']))
    dataloader = torch.utils.data.DataLoader(
        dataset, kwargs['batch_size'], shuffle=False)

    attack_config = utils.read_attack_config_file(kwargs['attack_config_file'])

    attack_pool = parsing.parse_attack_pool(
        kwargs['attacks'], kwargs['domain'], kwargs['p'], kwargs['attack_type'], model, attack_config, kwargs['device'], seed=kwargs['seed'])

    p = kwargs['p']

    if kwargs['blind_trust']:
        logger.warning(
            'Blind trust is activated. This means that the success of the attack will NOT be checked.')

    adversarial_dataset = tests.attack_test(model, attack_pool, dataloader, p, kwargs['misclassification_policy'],
                                            kwargs['device'], attack_config, kwargs, dataset.start, dataset.stop,
                                            None, blind_trust=kwargs['blind_trust'])
    adversarial_dataset.print_stats()

    if kwargs['save_to'] is not None:
        utils.save_zip(adversarial_dataset, kwargs['save_to'])

    if kwargs['show'] is not None:
        utils.show_images(adversarial_dataset.genuines,
                          adversarial_dataset.adversarials, limit=kwargs['show'], model=model)
    def show_sample(self,
                    content_img,
                    style_img,
                    concate=True,
                    denorm=True,
                    deprocess=True):
        gen_img = self.generate(content_img, style_img)

        if concate:
            return utils.show_images(
                np.concatenate([content_img, style_img, gen_img]), denorm,
                deprocess)

        if denorm:
            content_img = utils.de_norm(content_img)
            style_img = utils.de_norm(style_img)
            gen_img = utils.de_norm(gen_img)
        if deprocess:
            content_img = utils.deprocess(content_img)
            style_img = utils.deprocess(style_img)
            gen_img = utils.deprocess(gen_img)

        cv2_imshow(content_img[0])
        cv2_imshow(style_img[0])
        cv2_imshow(gen_img[0])
    def show_imgs(self, img):
        if len(img.shape) == 4:
            return utils.show_images(img, self.normalize, self.preprocessing)

        if self.normalize:
            img = utils.de_norm(img)
        if self.preprocessing:
            img = utils.deprocess(img)

        cv2_imshow(img)
    def test(self,classes_num=None):
        if classes_num is None:
            classes_num = self.params.show_classes_num
        img_size = self.params.img_size
        img_channel = self.params.img_channel
        
        img_list = []
        for i in range(classes_num):
            show_class = self.params.show_classes[i]
            img = self.dloader.img_grouped[show_class[0]][show_class[1]]
            img_list.append(img)
        img = torch.stack(img_list)
        img = img.to(device=device_CPU,dtype=dtype)
        
        imgr=img.view(-1,img_channel,img_size,img_size)
        imgo = self.model(imgr.to(device=device,dtype=dtype))
        imgo = imgo.view(-1,img_channel*img_size*img_size).detach().to(device=device_CPU,dtype=dtype)

        #print(img.shape,imgo.shape,torch.cat((img,imgo)).shape)
        utils.show_images(torch.cat((img,imgo)),self.params)
def main(preview_data=False):

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, y_train = prepare_data(x_train, y_train)
    x_test, y_test = prepare_data(x_test, y_test)

    multiplier = np.random.uniform(0, 1, x_train.shape[0])
    noise = np.random.normal(0.2, 0.2, x_train.shape)
    x_train += multiplier[:, None, None, None] * noise

    # with Stopwatch('Preparing data'):
    #     generator = ImageDataGenerator(
    #          rotation_range=30,
    #          zoom_range=0,
    #          width_shift_range=4,
    #          height_shift_range=4,
    #          shear_range=10,
    #          brightness_range=None,
    #          fill_mode='nearest',
    #          data_format='channels_last')
    #     data = generator.flow(x_train, y_train, batch_size=len(x_train))
    #     x_gen, y_gen = next(data)
    #     x_train = np.concatenate((x_train, x_gen), axis=0)
    #     y_train = np.concatenate((y_train, y_gen), axis=0)

    if preview_data:
        show_images(x_train)

    model = None
    with tf.device('/gpu:0'):

        config = tf.ConfigProto(log_device_placement=False)
        sess = tf.Session(config=config)
        K.set_session(sess)

        model = learn(x_train, y_train, x_test, y_test)
        test(model, x_test, y_test)
        #optimize(x_train, y_train)

        if model and preview_data:
            show_failures(model, x_test, y_test)
Example #17
0
def test_harris(path):
    img = load_image(path, is_float=True)
    corners_ocv = cv2.cornerHarris(img, 5, 3, 0.04, cv2.BORDER_CONSTANT)
    # corners_orig[corners_orig < 0.0] = 0.0

    model = km.Model(
        name='harris_fcn',
        model=create_harris_model(
            kernel_size=(5, 5),
            k=0.04,
            t=0.0,
            with_nms=False  # testing mode
        )
    )

    corners_fcn = model.predict(img)
    corners_adf = km.adf(corners_fcn, corners_ocv, 3)
    print(corners_adf.max())

    f, axes = plt.subplots(2, 2)
    show_images(f, axes, [img, corners_fcn, corners_ocv, corners_adf])
def show_bad_cases(test_fname, param):
    # load test data
    X_test, y_test = utils.load_data(test_fname)
    X_test_normed = np.array(
        [utils.pre_process(X_test[i]) for i in range(len(X_test))],
        dtype=np.float32)

    n_data, n_rows, n_cols, n_channels = X_test.shape
    param._n_rows = n_rows
    param._n_cols = n_cols
    param._n_channels = n_channels

    # load model
    n_classes = int(np.max(y_test) + 1)
    tf.reset_default_graph()
    net = network.TrafficSignNet(n_classes, param)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver()
    saver.restore(sess, param._model_fname)

    # test
    preds_test = sess.run(net._preds, {
        net._X: X_test_normed,
        net._is_training: False
    })
    test_accuracy = utils.classification_accuracy(y_test, preds_test)
    print('test accuracy: ', test_accuracy)
    sess.close()
    X_test_normed = None

    # show test images that are not correctly classified
    err_indices = np.where(preds_test != y_test)[0]
    utils.show_images(X_test,
                      y_test,
                      err_indices,
                      n_cols=5,
                      num_images=200,
                      preds=preds_test)
def main(preview_data=False):

    x, y = generate_images(nm_samples=70000, side_pixels=28)
    if preview_data:
        show_images(x[:100], [str(i) for i in y[:100]], shape=(28, 28))

    x_train = x[0:-10000]
    y_train = y[0:-10000]
    x_test = x[-10000:]
    y_test = y[-10000:]

    model = None
    with tf.device('/GPU:0'):

        config = tf.ConfigProto(log_device_placement=False)
        sess = tf.Session(config=config)
        K.set_session(sess)

        model = learn(x_train, y_train, x_test, y_test)

        PredictionServer.model = model
        server.test(HandlerClass=PredictionServer)
 def show_switch_latent(self, show_range = None):
     if show_range is None:
         show_range = self.params.show_classes_num
     s_latent=[]
     z_latent=[]
     img_lists = []
     real_lists = []
     ground_truth_list = []
     s_enc = self.s_enc
     z_enc = self.z_enc
     sz_dec = self.sz_dec
     img_size = self.params.img_size
     img_channel = self.params.img_channel
     show_classes = self.params.show_classes
     
     # first image is a black one
     img_lists.append( torch.tensor( np.zeros(img_channel*img_size*img_size) ).to(device=device,dtype=dtype)  )
     
     for classi in range(show_range):
         show_class = show_classes[classi]
         img = self.dloader.img_grouped[show_class[0]][show_class[1]].view(1,img_channel,img_size,img_size)
         real_lists.append(self.dloader.img_grouped[show_class[0]][show_class[1]])
         img = img.to(device=device,dtype=dtype)
         
         ground_truth_list.append( img.view( img_channel*img_size*img_size ) )
         s_latent.append( s_enc(img) )
         z_latent.append( z_enc(img) )
     img_lists.extend( ground_truth_list )
         
     for row in range( show_range ):
         img_lists.append( ground_truth_list[row] )
         for col in range( show_range ):
             latent = torch.cat((s_latent[col],z_latent[row]),dim=1)
             recon  = sz_dec(latent)
             img_lists.append(recon.view( img_channel*img_size*img_size ) )
             
     utils.show_images(torch.stack(img_lists).detach().cpu().numpy(),self.params)
     utils.show_images(torch.stack(real_lists).detach().cpu().numpy(),self.params)
    def show_interpolated(self, inter_step = 4, tuples=None):
        if tuples is None:
            tuples = self.params.interpolated_tuples
        img_size = self.params.img_size
        img_channel = self.params.img_channel
        
        inter_img1 = self.dloader.img_grouped[tuples[0][0]][tuples[0][1]].view(1,img_channel,img_size,img_size)
        inter_img2 = self.dloader.img_grouped[tuples[1][0]][tuples[1][1]].view(1,img_channel,img_size,img_size)
        inter_img1 = inter_img1.to(device=device,dtype=dtype)
        inter_img2 = inter_img2.to(device=device,dtype=dtype)
        
        s_enc = self.s_enc
        z_enc = self.z_enc
        sz_dec = self.sz_dec

        s_lat1 = s_enc(inter_img1)
        z_lat1 = z_enc(inter_img1)
        s_lat2 = s_enc(inter_img2)
        z_lat2 = z_enc(inter_img2)

        weights = np.arange(0,1,1/(inter_step-1))
        weights = np.append(weights,1.)
        weights = torch.tensor(weights)
        weights = weights.to(device=device,dtype=dtype)

        #print(z_lat1,z_lat2)

        img_lists = []
        for row_w in weights:
            for col_w in weights:
                s_latent =  (1-row_w) * s_lat1 + row_w * s_lat2
                z_latent =  (1-col_w) * z_lat1 + col_w * z_lat2
                latent = torch.cat((s_latent,z_latent),dim=1)
                recon  = sz_dec(latent)
                img_lists.append(recon.view( img_channel*img_size*img_size ) )

        utils.show_images(torch.stack(img_lists).detach().cpu().numpy(),self.params)
Example #22
0

def get_data(batch_size, train_augs, test_augs=None):
    cifar10_train = gluon.data.vision.CIFAR10(
        train=True, transform=get_transform(train_augs))
    cifar10_test = gluon.data.vision.CIFAR10(
        train=False, transform=get_transform(test_augs))
    train_data = utils.DataLoader(cifar10_train, batch_size, shuffle=True)
    test_data = utils.DataLoader(cifar10_test, batch_size, shuffle=True)
    return (train_data, test_data)


train_data, _ = get_data(36, train_augs)
for imgs, _ in train_data:
    break
utils.show_images(imgs.transpose((0, 2, 3, 1)), 6, 6)

from mxnet import init


def train(train_augs, test_augs, learning_rate=.1):
    batch_size = 128
    num_epochs = 10
    ctx = mx.cpu()
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    train_data, test_data = get_data(batch_size, train_augs, test_augs)
    net = utils.resnet18(10)
    net.initialize(init=init.Xavier())
    net.hybridize()
    trainer = gluon.Trainer(net.collect_params(), 'sgd',
                            {'learning_rate': learning_rate})
Example #23
0
#     batches = 0
#     for X_batch, Y_batch in izip(image_datagen.flow(data,shuffle=False,batch_size=1,seed=seed),mask_datagen.flow(labels,shuffle=False,batch_size=1,seed=seed)):
#         print X_batch.shape, Y_batch.shape
#         x = combine_channels([X_batch[0]])[0]
#         y = Y_batch[0].reshape((img_rows,img_cols))
#         print x.shape, y.shape
#         # show_images([x,y])
#         #
#         # raise
#         loss = model.train(X_batch, Y_batch)
#         batches += 1
#         if batches >= len(X_train) / 32:
#             # we need to break the loop by hand because
#             # the generator loops indefinitely
#             break




save_model(model,"aug_dunet")

from sklearn.utils import shuffle
inds = shuffle(range(len(data)))[:5]
preds = model.predict(data[inds],verbose=1)
print "preds orig:\n",preds
print "orig shape",preds.shape
data_dash = combine_channels(data[inds])
preds = preds.reshape((-1,img_rows,img_cols))
show_images(data_dash,preds/255)

Example #24
0
def ben():
    """Video streaming generator function."""
    transform = transforms.Compose(
        [transforms.Resize((160, 160)),
         transforms.ToTensor()])

    # Parameters
    usn_number = temp__
    usn_number = usn_number.strip()
    usn_number = usn_number.upper()
    print(usn_number)
    frame_rate = 16
    prev = 0
    image_size = 600
    threshold = 0.80
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    bbx_color = (0, 255, 0)
    wait_time = 10  # For face scan
    time_to_adjust = 10  # Before book scan begins

    current_person = None
    # Init MTCNN object
    reader = easyocr.Reader(
        ['en'])  # need to run only once to load model into memory
    mtcnn = MTCNN(image_size=image_size,
                  keep_all=True,
                  device=device,
                  post_process=True)
    model = InceptionResnetV1(pretrained='vggface2', classify=False).eval()
    # Real time data from webcam
    frames = []
    boxes = []
    face_results = []
    # Load stored face data related to respective card number
    faces = []
    usn_nums = []
    face_file = None
    try:
        for usn_ in os.listdir('flask-opencv-streaming-master\Dataset'):
            face_file = open(
                'flask-opencv-streaming-master\Dataset' + '/' + usn_, 'rb')
            if face_file is not None:
                face = pickle.load(face_file)
                faces.append(face)
                usn_nums.append(usn_)
    except FileNotFoundError:
        print('Face data doesnt exist for this USN.')
        exit()
    # Infinite Face Detection Loop
    v_cap = cv2.VideoCapture(0)
    v_cap.set(cv2.CAP_PROP_FRAME_WIDTH, image_size)
    v_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, image_size)
    flag = False
    start = time.time()
    while (True):
        time_elapsed = time.time() - prev
        break_time = time.time() - start
        if break_time > wait_time:
            break
        ret, frame = v_cap.read()
        if time_elapsed > 1. / frame_rate:  # Collect frames every 1/frame_rate of a second
            prev = time.time()
            frame_ = Image.fromarray(frame)
            frames.append(frame_)
            batch_boxes, prob, landmark = mtcnn.detect(frames, landmarks=True)
            frames_duplicate = frames.copy()
            boxes.append(batch_boxes)
            boxes_duplicate = boxes.copy()
            # show imgs with bbxs
            img, result = show_images(frames_duplicate, boxes_duplicate,
                                      bbx_color, transform, threshold, model,
                                      faces, usn_nums, usn_number)
            face_results.append(result)
            cv2.imshow("Detection", img)
            frames = []
            boxes = []
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    v_cap.release()
    cv2.destroyAllWindows()
    accuracy = (sum(face_results) / len(face_results)) * 100
    print('Percentage match ' + '{:.2f}'.format(accuracy))
    if accuracy > 60:
        print('Authorization Successful')
        print('Happy Learning')
    else:
        print('Authorization Unsuccessful')
        return render_template('index.html')
        exit()
    temp = 'y'
    if temp != 'y':
        print('No books borrowed')

    scan_books(reader, temp, image_size, time_to_adjust, usn_number)
Example #25
0
    60000).map(lambda x: tf.image.random_crop(
        tf.pad((tf.image.resize(tf.expand_dims(x, -1),
                                (32, 32))), [[1, 1], [1, 1], [0, 0]],
               mode='SYMMETRIC'), [32, 32, 1])).batch(256).prefetch(
                   tf.data.experimental.AUTOTUNE)

noiseDim = 32
N = 180  # Number of images to generate

gan = DCGAN(noiseDim, (32, 32, 1), 1 / 4, lr=0.0002)
gan.summary()

# Train DC GAN
gan.train(train_dataset,
          300,
          initial_epoch=0,
          save_gen_freq=1,
          save_examples_dir='mnist/cutG/',
          checkpoint_freq=2,
          checkpoint_dir='mnist/saves/')
# Plot Generator and Discriminator loss history
gan.plotLossHistory()

# Generate images
imgs = gan.generator(tf.random.normal((N, noiseDim))).numpy()
imgs = imgs * 127.5 + 127.5
show_images(imgs)

# Measure FID
print(f"FID: {gan.FID(train_dataset)}")
Example #26
0
import sys
sys.path.append("..")
import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

if torch.cuda.is_available():
    PATH = '/GPUFS/sysu_zhenghch_1/yuange/pytorch/dataset'
else:
    PATH = 'D:/Datasets'

train_imgs = ImageFolder(os.path.join(PATH, 'hotdog/train'))
test_imgs = ImageFolder(os.path.join(PATH, 'hotdog/test'))

hotdogs = [train_imgs[i][0] for i in range(8)]
not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]
utils.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4)

# 使用预训练模型时, 必须和预训练做同样的处理
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
train_augs = transforms.Compose([
    transforms.RandomResizedCrop(size=224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(), normalize
])

test_augs = transforms.Compose([
    transforms.Resize(size=256),
    transforms.CenterCrop(size=224),
    transforms.ToTensor(), normalize
])
from PIL import Image

import sys

sys.path.append("..")
import utils

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 下载数据集
if torch.cuda.is_available():
    PATH = '/GPUFS/sysu_zhenghch_1/yuange/pytorch/dataset/CIFAR10'
else:
    PATH = 'D:/Datasets/CIFAR10'
all_images = torchvision.datasets.CIFAR10(train=True, root=PATH, download=True)
utils.show_images([all_images[i][0] for i in range(32)], 4, 8, scale=0.8)

# 图像增广
flip_aug = torchvision.transforms.Compose([
    torchvision.transforms.RandomHorizontalFlip(),
    torchvision.transforms.ToTensor()  # 转为pytorch可用格式
])

no_aug = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])

# 获取数据
num_workers = 0 if sys.platform.startswith('win') else 4


def load_cifar10(is_train, augs, batch_size, root=PATH):
    dataset = torchvision.datasets.CIFAR10(root=PATH,
Example #28
0
File: Main.py Project: clebov/Ai
scale_size = 64 # We resize the images to 64x64 for training
NOISE_DIM = 100
NUM_EPOCHS = 50
learning_rate = 0.0002
celeba_root = 'celeb'

celeba_train = ImageFolder(root=celeba_root, transform=transforms.Compose([
transforms.Resize(scale_size),
transforms.ToTensor(),
]))


celeba_loader_train = DataLoader(celeba_train, batch_size=batch_size, drop_last=True)

imgs = celeba_loader_train.__iter__().next()[0].numpy().squeeze()
show_images(imgs, color=True)

# =============================================================================
print("Vanilla GAN")
D = Discriminator().to(device)
G = Generator(noise_dim=NOISE_DIM).to(device)
D_optimizer = torch.optim.Adam(D.parameters(), lr=learning_rate, betas = (0.5,
0.999))
G_optimizer = torch.optim.Adam(G.parameters(), lr=learning_rate, betas = (0.5,
0.999))
#original gan
train(D, G, D_optimizer, G_optimizer, discriminator_loss,
generator_loss, num_epochs=NUM_EPOCHS, show_every=250,
train_loader=celeba_loader_train, device=device)
# =============================================================================
torch.cuda.empty_cache()
def main():
    with open(os.path.join('features', 'images_features_3716_images.txt'),
              'rb') as f:
        data = pickle.load(f)
        images = [x + '.jpg' for x in data.keys()]
        X = np.vstack(list(data.values()))

        pca = PCA(n_components=2, whiten=True).fit(X)
        X = pca.transform(X)
        print('PCA transform done')
        # #############################################################################
        # Generate sample data
        # centers = [[1, 1], [-1, -1], [1, -1]]
        # X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
        #                             random_state=0)
        #
        # print(X)

        # #############################################################################
        # Compute Affinity Propagation
        af = AffinityPropagation().fit(X)
        cluster_centers_indices = af.cluster_centers_indices_
        labels = af.labels_
        print('Done clustering')

        n_clusters_ = len(cluster_centers_indices)
        print('Total clusters', n_clusters_)
        input('Press Enter to Continue')
        #
        # print('Estimated number of clusters: %d' % n_clusters_)
        # print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
        # print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
        # print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
        # print("Adjusted Rand Index: %0.3f"
        #       % metrics.adjusted_rand_score(labels_true, labels))
        # print("Adjusted Mutual Information: %0.3f"
        #       % metrics.adjusted_mutual_info_score(labels_true, labels))
        # print("Silhouette Coefficient: %0.3f"
        #       % metrics.silhouette_score(X, labels, metric='sqeuclidean'))

        # #############################################################################
        # Plot result
        if not os.path.exists(os.path.join('Results', DIR)):
            os.mkdir(os.path.join('Results', DIR))
        else:
            [
                os.unlink(os.path.join('Results', DIR, cf))
                for cf in os.listdir(os.path.join('Results', DIR))
            ]

        print('Making images clusters...')
        images_clusters = defaultdict()
        for img in images:
            if labels[images.index(img)] not in images_clusters.keys():
                images_clusters[labels[images.index(img)]] = []
            images_clusters[labels[images.index(img)]].append(img)
            # path = os.path.join('affinity_cluster_images', str(labels[images.index(img)]))
            # if not os.path.exists(path):
            #     os.mkdir(path)
            # copy(os.path.join('images', img), path)

        print('forming cluster images...')
        for clus in images_clusters.keys():
            print('Cluster', clus)
            print('No of images', len(images_clusters[clus]))
            c_images = [
                cv2.resize(cv2.imread(os.path.join('data', 'OCTimages', c)),
                           (700, 700)) for c in images_clusters[clus]
            ]
            cols = len(c_images) // 2 if len(c_images) // 2 > 0 else 1
            show_images(c_images[:15], DIR=DIR, cluster=clus, cols=cols)
            print('Saved', clus)
Example #30
0
def apply(img, aug, n=3):
    X = [aug(img.astype('float32')) for _ in range(n * n)]
    Y = nd.stack(*X).clip(0, 255) / 255
    utils.show_images(Y, n, n, figsize=(256, 256))