コード例 #1
0
    def sample_images(self, epoch):
        r, c = 10, 10
        ones = np.ones((r * c, 1))
        enoise = np.random.normal(
            0.0, 1.0, size=[r * c, self.latent_dim - self.num_classes])
        enoiseImage = np.random.uniform(
            0.0, 1.0, size=[r * c, self.img_rows, self.img_cols, 1])
        sampled_labels = np.random.uniform(0, 1, (r * c, self.num_classes))
        sampled_labels = np.around(sampled_labels)

        noise = np.concatenate((sampled_labels, enoise), axis=-1)
        gen_imgs = self.generator.predict([ones, enoiseImage, noise])
        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5
        utils.write_image(self.writer,
                          'Generated Image',
                          gen_imgs[:10],
                          step=epoch)
        fig, axs = plt.subplots(r, c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i, j].imshow(gen_imgs[cnt, :, :, :])
                axs[i, j].axis('off')
                cnt += 1
        fig.savefig(self.images_path + "/%d.png" % (epoch))
        plt.close()
コード例 #2
0
    def _execute(self, *args):
        """
        Execute all phases on the image.

        :return: None
        """
        for p in (x for x in
                  self.__phases[self.__starting_step:self.__ending_step]):
            r = run_worker(p, self.__image_steps, config=self._args)
            self.__image_steps.append(r)

            if self.__altered_path:
                if (self._args.get('folder_altered')):
                    path = self._args['folder_altered']
                else:
                    path = self.__altered_path

                write_image(r, os.path.join(path, "{}.png".format(p.__name__)))

                Conf.log.debug("{} Step Image Of {} Execution".format(
                    os.path.join(path, "{}.png".format(p.__name__)),
                    camel_case_to_str(p.__name__),
                ))

        write_image(self.__image_steps[-1], self.__output_path)
        Conf.log.info("{} Created".format(self.__output_path))
        Conf.log.debug("{} Result Image Of {} Execution".format(
            self.__output_path, camel_case_to_str(self.__class__.__name__)))

        return self.__image_steps[-1]
コード例 #3
0
    def minimize_with_adam(self, optimizer):
        if self.verbose:
            print('\nMINIMIZING LOSS USING: ADAM OPTIMIZER')
        train_op = optimizer.minimize(self.total_loss)
        init_op = tf.global_variables_initializer()
        self.sess.run(init_op)
        self.sess.run(self.net['input'].assign(self.init_img))

        if self.write_iterations_adam:
            out_dir = os.path.join(self.img_output_dir, self.img_name,
                                   timestr.get_time())
            maybe_make_directory(out_dir)

        for iterations in range(self.max_iterations):
            self.sess.run(train_op)

            # write image at every iteration
            if self.write_iterations_adam:
                img_path = os.path.join(
                    out_dir, self.img_name + str(iterations) + '.png')
                output_img = self.sess.run(self.net['input'])
                write_image(img_path, output_img)
            if iterations % self.print_iterations == 0 and self.verbose:
                curr_loss = self.total_loss.eval()
                print("At iterate {}\tf= {}".format(iterations, curr_loss[0]))
コード例 #4
0
ファイル: main.py プロジェクト: josephsv96/DistortionRemover
def main():
    config = load_config()
    working_dir, output_path = create_output_folder()
    img_file_list = list(working_dir.glob('**/*.bmp'))

    for img_file in tqdm(img_file_list):
        src = read_image(img_file, config)
        dst = DistortionRemover(src, config).image_undist()
        out_file = str(output_path / (img_file.stem + "_undist.bmp"))
        write_image(dst, out_file)

    return True
コード例 #5
0
ファイル: yolo.py プロジェクト: Karsten12/Detect
def postprocess(frame, outs, show_frame=False, save_image=False):
    """ Remove the bounding boxes with low confidence using non-maxima suppression
    
    Arguments:
        frame {[type]} -- The image containing the detected object
        outs {[type]} -- The output of the yolov3 neural net for this frame
    
    Keyword Arguments:
        show_frame {bool} -- Whether to show/display frames (default: {False})
        save_image {bool} -- Whether to save images of detected objects (default: {False})
    """
    frameHeight = frame.shape[0]
    frameWidth = frame.shape[1]

    # Scan through all the bounding boxes output from the network and keep only the
    # ones with high confidence scores. Assign the box's class label as the class with the highest score.
    classIds = []
    confidences = []
    boxes = []
    for out in outs:
        for detection in out:
            scores = detection[5:]
            classId = np.argmax(scores)
            confidence = scores[classId]
            if confidence > confThreshold:
                center_x = int(detection[0] * frameWidth)
                center_y = int(detection[1] * frameHeight)
                width = int(detection[2] * frameWidth)
                height = int(detection[3] * frameHeight)
                left = int(center_x - width / 2)
                top = int(center_y - height / 2)
                classIds.append(classId)
                confidences.append(float(confidence))
                boxes.append([left, top, width, height])

    # non maximum suppression to eliminate redundant overlapping boxes with lower confidences
    indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
    for i in indices:
        i = i[0]
        # Skip classes that aren't people
        if classIds[i] != 2:
            continue
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        if save_image:
            class_name = classes[classIds[i]]
            dimensions = (top, top + height, left, left + width)
            utils.write_image(frame, "output/yolo", class_name, dimensions)
        if show_frame:
            drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
コード例 #6
0
ファイル: style.py プロジェクト: xfarxod/deep-style-transfer
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--model_file',
                        type=str,
                        default='data/vg-30.pb',
                        help='Pretrained model file to run')

    parser.add_argument('--input',
                        type=str,
                        default='data/sf.jpg',
                        help='Input image to process')
    parser.add_argument('--output',
                        type=str,
                        default="output.png",
                        help='Output image file')

    args = parser.parse_args()
    logging.basicConfig(stream=sys.stdout,
                        format='%(asctime)s %(levelname)s:%(message)s',
                        level=logging.INFO,
                        datefmt='%I:%M:%S')

    with open(args.model_file, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def)
        graph = tf.get_default_graph()

    with tf.Session(config=tf.ConfigProto(
            intra_op_parallelism_threads=4)) as session:
        graph_info = session.graph

        logging.info("Initializing graph")
        session.run(tf.initialize_all_variables())

        model_name = os.path.split(args.model_file)[-1][:-3]
        image = graph.get_tensor_by_name("import/%s/image_in:0" % model_name)
        out = graph.get_tensor_by_name("import/%s/output:0" % model_name)

        shape = image.get_shape().as_list()
        target = [
            utils.load_image(args.input, image_h=shape[1], image_w=shape[2])
        ]
        logging.info("Processing image")
        start_time = datetime.now()
        processed = session.run(out, feed_dict={image: target})
        logging.info("Processing took %f" %
                     ((datetime.now() - start_time).total_seconds()))
        utils.write_image(args.output, processed)
        logging.info("Done")
コード例 #7
0
def main():
    config = load_config()
    working_dir, output_path = create_output_folder()
    img_file_list = list(working_dir.glob('**/*.bmp'))

    img_num = len(img_file_list)    # For progress bar

    for i, img_file in enumerate(img_file_list):
        src = read_image(img_file, config)
        dst = DistortionRemover(src, config).image_undist()
        out_file = str(output_path / (img_file.stem + "_undist.bmp"))
        write_image(dst, out_file)
        print("Processed:" + str(i+1) + "/" + str(img_num))

    return True
コード例 #8
0
def run_image_test(input_dir, output_dir):
    """ Performs lane detection on the test images """
    # Create an output dir if necessary
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Read the input_files into a list
    filenames = os.listdir(input_dir)

    for img_file in filenames:
        # open image
        input_img = utils.read_image(input_dir + img_file)
        # process image
        output_img = lane_detection_pipeline(input_img)
        # save the output image
        utils.write_image(output_dir + img_file, output_img, BGR=True)
コード例 #9
0
def draw(model_name, x_te, y_te):
    model = load_model('model/%s.h5' % model_name)
    output_dir = 'output/' + model_name

    imgs_pred = model.predict(x_te[:50])
    imgs_pred = np.argmax(imgs_pred, axis=3)

    y_te = np.reshape(y_te, [-1, 72, 72])
    print(y_te.shape)

    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    for idx, img_pred in enumerate(imgs_pred):
        write_image('%s/valid.%03d.png' % (output_dir, idx), img_pred)
        write_image('%s/true.%03d.png' % (output_dir, idx), y_te[idx])
コード例 #10
0
    def _setup(self, *args):
        self.__phases = select_phases(self._args)
        self.__input_path = self._args['input']
        self.__output_path = self._args['output']
        self.__tmp_dir = None
        self.__temp_input_paths = []
        self.__temp_output_paths = []
        self.__tmp_dir = tempfile.mkdtemp()
        self.__fps = 25.0

        Conf.log.debug("Temporay dir is {}".format(self.__tmp_dir))

        try:
            video = cv2.VideoCapture(self.__input_path)
            self.__fps = video.get(cv2.CAP_PROP_FPS)
        except:
            Conf.log.debug(
                "Error trying to get frame-rate from gif. Default: 25")

        if self.__fps <= 0:
            self.__fps = 25.0

        imgs = imageio.get_reader(self.__input_path)

        self.__temp_input_paths = []
        self.__temp_output_paths = []

        for i, im in enumerate(imgs):
            frame_input_path = os.path.join(self.__tmp_dir,
                                            "input_{}.png".format(i))
            frame_output_path = os.path.join(self.__tmp_dir,
                                             "output_{}.png".format(i))

            self.__temp_input_paths = self.__temp_input_paths + [
                frame_input_path
            ]
            self.__temp_output_paths = self.__temp_output_paths + [
                frame_output_path
            ]

            write_image(cv2.cvtColor(im, cv2.COLOR_RGB2BGR), frame_input_path)

        Conf.log.info("GIF have {} frames to process @ {}fps".format(
            len(self.__temp_input_paths), self.__fps))

        self._args['input'] = self.__temp_input_paths
        self._args['output'] = self.__temp_output_paths
コード例 #11
0
def sobel(r):
    image = read_image(r)
    ACCESS_LOG("Response shape: {}".format(image.shape))
    image = cv.Canny(image, 300, 200)
    # image = sobel_each(image)
    output = write_image(image, format="png")

    return output
コード例 #12
0
 def run_epoch(self,
               session,
               train_op,
               train_writer,
               batch_gen=None,
               num_iterations=NUM_ITERATIONS,
               output_dir="output",
               write_image=False):
     epoch_size = num_iterations
     start_time = time.time()
     image_skip = 1 if epoch_size < 5 else epoch_size / 5
     summary_skip = 1 if epoch_size < 25 else epoch_size / 25
     for step in xrange(epoch_size):
         if self.model_name == MULTISCALE:
             feed = self.add_noise_to_feed({})
         else:
             feed = {}
         batch = batch_gen.get_batch()
         feed[self.image] = batch
         if self.is_training:
             ops = [
                 train_op, self.loss, self.merged, self.image_summary,
                 self.input_summary, self.generator.out, self.global_step
             ]
             _, loss, summary, image_summary, input_summary, last_out, global_step = session.run(
                 ops, feed_dict=feed)
             if write_image and step % image_skip == 0:
                 utils.write_image(
                     os.path.join('%s/images/valid_%d.png' %
                                  (output_dir, step)), last_out)
             if train_writer != None:
                 if step % summary_skip == 0:
                     train_writer.add_summary(summary, global_step)
                     train_writer.flush()
                 if step % image_skip == 0:
                     train_writer.add_summary(input_summary)
                     train_writer.flush()
                     train_writer.add_summary(image_summary)
                     train_writer.flush()
         else:
             ops = self.generator.out
             last_out = session.run(ops, feed_dict=feed)
             loss = summary = image_summary = input_summary = global_step = None
     return loss, summary, image_summary, last_out, global_step
コード例 #13
0
    def validate(self, glasses=False, male=False):
        noise = np.random.normal(0, 1, (10, self.latent_dim))

        if glasses or male:
            for j in range(10):
                noise = np.random.normal(0, 1, (10, self.latent_dim))
                fig, axs = plt.subplots(2, 10)
                label = np.array([[0, 0, 0, 0, 0] for _ in range(10)])
                img_default = 0.5 * self.generator.predict([noise, label
                                                            ]) + 0.5
                for i in range(10):
                    axs[0, i].imshow(img_default[i])
                    axs[0, i].axis('off')
                if glasses:
                    label = np.array([[0, 1, 0, 0, 0] for _ in range(10)])
                    img_condition = 0.5 * self.generator.predict(
                        [noise, label]) + 0.5
                elif male:
                    label = np.array([[0, 0, 1, 0, 0] for _ in range(10)])
                    img_condition = 0.5 * self.generator.predict(
                        [noise, label]) + 0.5
                for i in range(10):
                    axs[1, i].imshow(img_condition[i])
                    axs[1, i].axis('off')
                fig.savefig('../images_condition/validate{}{}.png'.format(
                    '_glasses' if glasses else '_male', j))
            return

        fig, axs = plt.subplots(4, 8)

        for i in range(2**5):
            label_str = "{:05b}".format(i)
            print(label_str)
            label = np.array(
                [[int(label_str[j]) for j in range(len(label_str))]
                 for _ in range(10)])
            imgs = 0.5 * self.generator.predict([noise, label]) + 0.5
            utils.write_image(self.writer, 'Image: {}'.format(label_str), imgs)
            axs[i // (2**3), i % (2**3)].imshow(imgs[0])
            axs[i // (2**3), i % (2**3)].axis('off')
        fig.savefig('../images_condition/validate{}{}.png'.format(
            '_glasses' if glasses else '', '_male' if male else ''))
        plt.close()
コード例 #14
0
ファイル: visualizer.py プロジェクト: arassadin/dogsVScats
def visualize(model, layer_name):
    print 'Model loaded.'
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    for filter_index in sample(range(0, layer_dict[layer_name].nb_filter), 10):
        layer_output = layer_dict[layer_name].output
        loss = K.mean(layer_output[:, filter_index, :, :])
        grads = K.gradients(loss, model.layers[0].input)[0]
        grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
        iterate = K.function(
            [model.layers[0].input, K.learning_phase()], [loss, grads])

        input_img_data = np.asarray([read_image('visimage.jpg')])

        for _ in xrange(100):
            loss_value, grads_value = iterate([input_img_data, 0])
            input_img_data += grads_value * 3

        img = deprocess_image(input_img_data[0])
        write_image(img, '../activations/out{}.jpg'.format(filter_index))
コード例 #15
0
 def sample_images(self, epoch):
     r, c = 10, 10
     noise = np.random.normal(0, 1, (r * c, self.latent_dim))
     sampled_labels = np.random.uniform(0, 1, (r * c, self.num_classes))
     sampled_labels = np.around(sampled_labels)
     gen_imgs = self.generator.predict([noise, sampled_labels])
     # Rescale images 0 - 1
     gen_imgs = 0.5 * gen_imgs + 0.5
     utils.write_image(self.writer,
                       'Generated Image',
                       gen_imgs[:10],
                       step=epoch)
     fig, axs = plt.subplots(r, c)
     cnt = 0
     for i in range(r):
         for j in range(c):
             axs[i, j].imshow(gen_imgs[cnt, :, :, :])
             axs[i, j].axis('off')
             cnt += 1
     fig.savefig(self.images_path + "/%d.png" % (epoch))
     plt.close()
コード例 #16
0
    def test_forward_merge_one_to_active(self, relPath, baseFmt, block):
        """
        Forward Merge One to Active Layer

        Create image chain: BASE---S1---S2
        Start VM
        Merge S1 >> S2
        Final image chain:  BASE---S2
        """
        base_file = utils.create_image('BASE', fmt=baseFmt, block=block)
        utils.write_image(base_file, 0, 3072, 1)
        s1_file = utils.create_image('S1', 'BASE', relative=relPath,
                                     backingFmt=baseFmt, block=block)
        utils.write_image(s1_file, 1024, 2048, 2)
        s2_file = utils.create_image('S2', 'S1', relative=relPath,
                                     block=block)
        utils.write_image(s2_file, 2048, 1024, 3)

        dom = utils.create_vm('livemerge-test', 'S2', block=block)
        try:
            dom.blockRebase(s2_file, base_file, 0, 0)
            flags = libvirt.VIR_DOMAIN_BLOCK_JOB_TYPE_PULL
            self.assertTrue(utils.wait_block_job(dom, s2_file, flags))
        finally:
            dom.destroy()

        self.assertTrue(utils.verify_image(s2_file, 0, 1024, 1))
        self.assertTrue(utils.verify_image(s2_file, 1024, 1024, 2))
        self.assertTrue(utils.verify_image(s2_file, 2048, 1024, 3))
        self.assertTrue(utils.verify_backing_file(base_file, None))
        self.assertTrue(utils.verify_backing_file(s2_file, 'BASE',
                                                  relative=relPath,
                                                  block=block))
        self.assertTrue(utils.verify_image_format(s1_file, 'qcow2'))
コード例 #17
0
ファイル: gif.py プロジェクト: zero2723/dreampower
    def _setup(self, *args):
        self.__phases = select_phases(self._args)
        self.__input_path = self._args['input']
        self.__output_path = self._args['output']
        self.__tmp_dir = None
        self.__temp_input_paths = []
        self.__temp_output_paths = []
        self.__tmp_dir = tempfile.mkdtemp()
        Conf.log.debug("Temporay dir is {}".format(self.__tmp_dir))
        imgs = imageio.mimread(self.__input_path)
        Conf.log.info("GIF have {} Frames To Process".format(len(imgs)))
        self.__temp_input_paths = [
            os.path.join(self.__tmp_dir, "intput_{}.png".format(i))
            for i in range(len(imgs))
        ]
        self._args['input'] = self.__temp_input_paths
        self.__temp_output_paths = [
            os.path.join(self.__tmp_dir, "output_{}.png".format(i))
            for i in range(len(imgs))
        ]
        self._args['output'] = self.__temp_output_paths

        for i in zip(imgs, self.__temp_input_paths):
            write_image(cv2.cvtColor(i[0], cv2.COLOR_RGB2BGR), i[1])
コード例 #18
0
def correlation(x, y, tilematrix, layer1, layer2, date1, date2):
    s = time.time()
    image1 = get_range(x, y, tilematrix, "500m", layer1, date1, date2)
    image2 = get_range(x, y, tilematrix, "250m", layer2, date1, date2)
    e = time.time()

    ACCESS_LOG("Both requests took: {}s".format(e - s))
    ACCESS_LOG("image1 shape {}, image2 shape {}".format(
        image1.shape, image2.shape))

    n = 8
    increment = image1[0].shape[1] // n

    data1 = skimage.measure.block_reduce(image1, (1, n, n, 1), np.average)
    data2 = skimage.measure.block_reduce(image2, (1, n, n, 1), np.average)

    # data1 = np.zeros((n, n, image1[0].shape[-1], image1.shape[0]))
    # for i in range(n):
    #     for j in range(n):
    #         data1[i, j] = image1[:, increment * i : increment * (i + 1), increment * j : increment * (j + 1), 0:3].mean(axis=(1, 2)).T

    # data2 = np.zeros((n, n, image2[0].shape[-1], image2.shape[0]))
    # for i in range(n):
    #     for j in range(n):
    #         data2[i, j] = image2[:, increment * i : increment * (i + 1), increment * j : increment * (j + 1), 0:3].mean(axis=(1, 2)).T

    output = np.zeros(
        (image1[0].shape[0], image1[0].shape[1], image1[0].shape[-1]))

    s = time.time()
    for i in range(n):
        for j in range(n):
            for k in range(output.shape[2]):
                output[increment * i:increment * (i + 1),
                       increment * j:increment * (j + 1),
                       k] = np.corrcoef(data1[i, j, k], data2[i, j, k])[1, 0]

    e = time.time()
    ACCESS_LOG("output shape: {}. Correlation took {}s".format(
        output.shape, e - s))

    return write_image(output, format="jpeg")
コード例 #19
0
ファイル: image.py プロジェクト: ishandutta2007/dreampower
    def _execute(self, *args):
        """
        Execute all phases on the image.

        :return: None
        """
        # todo: refactor me, please!
        # with this we force the auto-resize for dreamtime, but it is far from ideal
        if self.__starting_step == 5:
            r = run_worker(self.__phases[0], self.__image_steps, config=self._args)
            self.__image_steps.append(r)

        for step,p in enumerate(x for x in self.__phases[self.__starting_step:self.__ending_step]):
            r = run_worker(p, self.__image_steps, config=self._args)
            self.__image_steps.append(r)

            # todo: refactor me, please!
            if self._args.get('export_step'):
                if self._args.get('export_step') == (step-1):
                    step_path = self._args.get('export_step_path') or os.path.abspath(os.path.join(self.__output_path, '..', 'export.png'))

                    write_image(r, step_path)

                    Conf.log.debug("Export Step Image Of {} Execution: {}".format(
                        camel_case_to_str(p.__name__),
                        step_path
                    ))

            if self.__altered_path:
                if (self._args.get('folder_altered')):
                    path = self._args['folder_altered']
                else:
                    path = self.__altered_path

                write_image(r, os.path.join(path, "{}.png".format(p.__name__)))

                Conf.log.debug("{} Step Image Of {} Execution".format(
                    os.path.join(path, "{}.png".format(p.__name__)),
                    camel_case_to_str(p.__name__),
                ))

        write_image(self.__image_steps[-1], self.__output_path)
        Conf.log.info("{} Created".format(self.__output_path))
        Conf.log.debug("{} Result Image Of {} Execution"
                       .format(self.__output_path, camel_case_to_str(self.__class__.__name__)))

        return self.__image_steps[-1]
コード例 #20
0
def downsample(r, n):
    image = read_image(r)

    ACCESS_LOG("Response shape: {}".format(image.shape))

    if n == 0:
        return r.content

    increment = image.shape[0] // n
    for i in range(n):
        for j in range(n):
            image[increment * i:increment * (i + 1),
                  increment * j:increment * (j + 1),
                  0:3] = image[increment * i:increment * (i + 1),
                               increment * j:increment * (j + 1),
                               0:3].mean(axis=(0, 1))

    if image.shape[2] == 4:
        image[:, :, 3] = 255

    output = write_image(image, format="png")

    return output
コード例 #21
0
        train_step(X)

        with summary_writer.as_default():
            tf.summary.scalar('style loss',
                              style_loss_metric.result(),
                              step=step_counter)
            tf.summary.scalar('content loss',
                              content_loss_metric.result(),
                              step=step_counter)
            tf.summary.scalar('tv loss',
                              tv_loss_metric.result(),
                              step=step_counter)

        if step_counter % auto_save_step == 0:
            test_output = transfer_model(test_img)
            write_image(
                os.path.join(model_path,
                             '{}.jpg'.format(step_counter // auto_save_step)),
                test_output[0] / 255.0)

        bar.update(step_counter)

        step_counter += 1
        if step_counter > step:
            break

    bar.finish()

    transfer_model.save_weights(os.path.join(model_path, 'weights.h5'))
コード例 #22
0
                                            window_shape, scaler,
                                            heat_threshold)

        print('Loading ' + input_file + ' as a ' +
              feature_parameters['cspace'] + ' image')
        img = read_image(input_file, feature_parameters['cspace'])
        output_to_file = output_file and len(output_file)

        print('Detecting vehicles')
        boxes = vehicle_detector(img, show_plots=False)
        print(boxes)
        output = draw_boxes(rgb(img, feature_parameters['cspace']), boxes)

        if output_to_file:
            print('Writing output to ' + output_file)
            write_image(output_file, output, 'RGB')
        else:
            plt.figure()
            plt.title(input_file)
            plt.imshow(output)
            plt.show()
    elif file_extension in ['mp4']:
        # process video
        vehicle_detector = Vehicle_pipeline(classifier,
                                            feature_parameters,
                                            window_shape,
                                            scaler,
                                            heat_threshold,
                                            alpha=0.125)

        def frame_handler(frame):
コード例 #23
0
        for j in range(labels.shape[1]):
            if (labels[i][j] == 0):
                labels[i][j] = centers[0] / 255.0
            else:
                labels[i][j] = centers[1] / 255.0

    return labels


if __name__ == "__main__":
    img = utils.read_image('lenna.png')
    k = 2

    start_time = time.time()
    centers, labels, sumdistance = kmeans(img, k)
    result = visualize(centers, labels)
    end_time = time.time()

    running_time = end_time - start_time
    print(running_time)

    centers = list(centers)
    with open('results/task1.json', "w") as jsonFile:
        jsonFile.write(
            json.dumps({
                "centers": centers,
                "distance": sumdistance,
                "time": running_time
            }))
    utils.write_image(result, 'results/task1_result.jpg')
コード例 #24
0
        loss_recon_warp = l1_loss(field_recon, field_p2c) * args.w_recon_field

        random_z = torch.randn(img_p.size(0), args.warp_dim, 1, 1).cuda()
        _, field_gen = warper.decode_f(embedding, random_z, scale=args.scale)
        img_warp_gen = F.grid_sample(img_p, field_gen, align_corners=True)
        loss_tv = tv_loss(img_warp_gen) * args.w_tv

        loss_total = loss_recon_p + loss_recon_warp + loss_tv
        loss_total.backward()
        opt.step()

        # output log
        if (step + 1) % args.snapshot_log == 0:
            end = time.time()
            print(
                'Step: {} ({:.0f}%) time:{} loss_rec_p:{:.4f} loss_rec_warp:{:.4f} loss_tv:{:.4f}'
                .format(step + 1, 100.0 * step / args.iteration,
                        int(end - start), loss_recon_p, loss_recon_warp,
                        loss_tv))
            # input photo, input caricature, image_warp_p2c, image_warp_generated
            vis = torch.stack(
                (img_p, img_c,
                 F.grid_sample(img_p, field_p2c,
                               align_corners=True), img_warp_gen),
                dim=1)
            write_image(step, image_dir, vis)

        # save checkpoint
        if (step + 1) % args.snapshot_save == 0:
            warper.save(checkpoint_dir, step)
コード例 #25
0
ファイル: 01_shonenjump.py プロジェクト: JohnKhor/tankobon
def shonenjump():
    # create image directory
    IMG_DIR = 'img'
    create_dir(IMG_DIR)

    rensai_soup = get_soup(get_content(RENSAI_URL))
    archives_soup = get_soup(get_content(ARCHIVES_URL))

    # store series information: name, abbreviated name and whether it is still ongoing
    all_series = []

    # create icon directory
    ICONS_DIR = os.path.join(IMG_DIR, 'icons')
    create_dir(ICONS_DIR)
    
    for soup in [rensai_soup, archives_soup]:
        # ongoing series?
        ongoing = True if soup is rensai_soup else False

        section = soup.find('section', class_='serialSeries')

        for li in section.find_all('li'):
            # series name in japanese
            name_jp = li.div.text if li.div else li.p.text
            name_jp = name_jp[1:name_jp.find('』')]
            
            link_tag = li.a

            # abbreviated name
            abbr = link_tag['href'].rsplit('/', 1)[1][:-5]

            # download icon
            img_src = link_tag.img['src']
            img_url = BASE_URL + img_src
            file_path = os.path.join(ICONS_DIR, abbr + '.' + img_src.rsplit('.', 1)[1])
            print(f'Downloading {file_path}...')
            write_image(img_url, file_path)
            
            # add series
            series = { 'name': name_jp, 'abbr': abbr, 'ongoing': ongoing }
            all_series.append(series)

    # save series information
    save_json("data.json", all_series)

    for series in all_series:
        # create directory for this series
        series_dir = os.path.join(IMG_DIR, series['abbr'])
        create_dir(series_dir)
            
        current_list_url = LIST_URL + series['abbr'] + '.html'

        while current_list_url:
            list_soup = get_soup(get_content(current_list_url))
            ul = list_soup.find('ul', class_='comicsList')
            
            # ignore series that hasn't release any volume yet
            if ul.li is None:
                break
            
            for dl in ul.select('li dl'):
                # skip current volume if it isn't released yet
                if '発売予定' in str(dl.p):
                    continue

                # download cover
                img_src = dl.img['src']
                img_url = BASE_URL + img_src
                file_path = os.path.join(series_dir, img_src.rsplit('/', 1)[1])
                print(f'Downloading {file_path}...')
                write_image(img_url, file_path)

            # get url for next list of covers
            next_list_url_tag = list_soup.find('span', class_='current_page').next_sibling.next_sibling
            if next_list_url_tag is None:
                break
            else:
                current_list_url = BASE_URL + next_list_url_tag['href']
コード例 #26
0
            # fliping the image
            frame = cv2.flip(frame, 1)
            # get key event
            key = key_action()

            # draw a [224x224] rectangle into the middle of the frame
            cv2.rectangle(frame, (0 + 88, 0 + 8), (224 + 88, 224 + 8),
                          (0, 0, 0), 2)
            # cv2.rectangle(frame,(0+880,0+80),(448+880,448+80),(0,0,0),2)

            image = frame[0 + 8:224 + 8, 0 + 88:224 + 88, :]
            if key == 'space':
                # write the image without overlay
                # extract the [224x224] rectangle out of it
                #image = frame[0+8: 224+8, 0+88: 224+88, :]
                write_image(out_folder, image)

            if key == 'p':
                last_detected = datetime.now()
            elif (datetime.now() - last_detected).total_seconds() < 5:

                # write the predictions on the frame
                # find the predictions
                predictions = predict_frame(image, model, classes)
                # print(predictions)
                cv2.putText(frame, predictions, (10, 300),
                            cv2.FONT_HERSHEY_PLAIN, .6, (0, 0, 0), 1,
                            cv2.LINE_AA)

            # if key == 's':
            #     last_detected = datetime.now()
コード例 #27
0
def main(filenames, name, gpu, model_loc, result_loc):
    os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpu
    cuda = Cuda(0)

    net_in = ['rgb']
    net_out = ['albedo', 'shading', 'segmentation']
    normalize = True
    net = DirectIntrinsicsSN(3, ['color', 'color', 'class'])

    # The following points to where the weight file is kept. Keep it above
    # the experiment folder. The experiment folder is attached along with the experiment,
    # based on the name
    output_path_root = model_loc
    output_path = os.path.join(output_path_root, 'experiment', name)

    # Change the following to the place where the image should be written
    results_path = os.path.join(result_loc, name)
    #os.makedirs(results_path, exist_ok=True)

    if not network.load_weights(output_path, net):
        # We load the model here. You can modify the os.path.join arguments to
        # reflect to your desired folder structure. We use an extra folder for
        # the checkpoints to keep it organized.
        net.load_state_dict(
            torch.load(
                os.path.join(output_path, 'checkpoints', 'final.checkpoint')))
    if cuda.enabled:
        net.cuda(device=cuda.device)

    net.eval()

    if os.path.isdir(filenames):
        print('Found directory. Scanning for png images...')
        files = glob.glob(os.path.join(filenames, '*.png'))
        print('Done! Found %d files' % len(files))
        for filename in tqdm(files):
            im = Image.open(filename)
            in_ = im.resize((352, 480), Image.ANTIALIAS)
            in_ = np.array(in_, dtype=np.int64)
            in_ = in_.astype(np.float32)

            in_[np.isnan(in_)] = 0
            in_ = in_.transpose((2, 0, 1))
            if normalize:
                in_ = (in_ * 255 / np.max(in_)).astype('uint8')
                in_ = (in_ / 255.0).astype(np.float32)

            in_ = np.expand_dims(in_, axis=0)
            rgb = torch.from_numpy(in_)
            if cuda.enabled:
                rgb = Variable(rgb).cuda(device=cuda.device)
            else:
                rgb = Variable(rgb)
            albedo_out, shading_out, segmentation_out = net(rgb)
            filename = filename.split('/')[-1]
            write_image(results_path,
                        albedo_out.detach().cpu().numpy(), filename, 'albedo')
            write_image(results_path,
                        shading_out.detach().cpu().numpy(), filename,
                        'shading')
            _, segmentation_pred = torch.max(segmentation_out.data, 1)
            write_image(results_path,
                        Garden.color_labels(segmentation_pred.cpu().numpy()),
                        filename, 'segmentation')
    else:
        print('Target is a single image.')
        filename = filenames
        im = Image.open(filename)
        in_ = im.resize((480, 352), Image.ANTIALIAS)
        in_ = np.array(in_, dtype=np.int64)
        in_ = in_.astype(np.float32)
        print(in_.shape)

        in_[np.isnan(in_)] = 0
        in_ = in_.transpose((2, 0, 1))
        if normalize:
            in_ = (in_ * 255 / np.max(in_)).astype('uint8')
            in_ = (in_ / 255.0).astype(np.float32)

        in_ = np.expand_dims(in_, axis=0)
        rgb = torch.from_numpy(in_)
        if cuda.enabled:
            rgb = Variable(rgb).cuda(device=cuda.device)
        else:
            rgb = Variable(rgb)
        print('Data loaded! Beginning inference')
        a = time.time()
        albedo_out, shading_out, segmentation_out = net(rgb)
        print('Inference completed in %.2f seconds' % (time.time() - a))
        print('Writing results')
        write_image(results_path,
                    albedo_out.detach().cpu().numpy(), filename, 'albedo')
        write_image(results_path,
                    shading_out.detach().cpu().numpy(), filename, 'shading')
        _, segmentation_pred = torch.max(segmentation_out.data, 1)
        write_image(results_path,
                    Garden.color_labels(segmentation_pred.cpu().numpy()),
                    filename, 'segmentation')
    print('Done! Shutting down...')
コード例 #28
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--input_dir',
                        type=str,
                        default="output",
                        help='directory of checkpoint files')
    parser.add_argument('--output',
                        type=str,
                        default=DEFAULT_MODEL,
                        help='exported file')
    parser.add_argument('--image_h',
                        type=int,
                        default=-1,
                        help='weight for texture loss vs content loss')
    parser.add_argument('--image_w',
                        type=int,
                        default=-1,
                        help='weight for texture loss vs content loss')

    parser.add_argument('--noise',
                        type=float,
                        default=0.,
                        help='noise magnitude')

    logging.basicConfig(stream=sys.stdout,
                        format='%(asctime)s %(levelname)s:%(message)s',
                        level=logging.INFO,
                        datefmt='%I:%M:%S')

    args = parser.parse_args()
    tmp_dir = os.path.join(args.input_dir, 'tmp')
    if not os.path.exists(tmp_dir):
        os.mkdir(tmp_dir)
    ckpt_dir = os.path.join(tmp_dir, 'ckpt')
    if not os.path.exists(ckpt_dir):
        os.mkdir(ckpt_dir)

    args.save_model = os.path.join(ckpt_dir, 'model')

    with open(os.path.join(args.input_dir, 'result.json'), 'r') as f:
        result = json.load(f)

    model_name = result['model_name']
    best_model_full = result['best_model']
    best_model_arr = best_model_full.split('/')
    best_model_arr[0] = args.input_dir
    best_model = os.path.join(*best_model_arr)

    if args.image_w < 0:
        if 'image_w' in result:
            args.image_w = result['image_w']
        else:
            args.image_w = vgg.DEFAULT_SIZE
    if args.image_h < 0:
        if 'image_h' in result:
            args.image_h = result['image_h']
        else:
            args.image_h = vgg.DEFAULT_SIZE

    if args.output == DEFAULT_MODEL:
        args.output = model_name + ".pb"

    logging.info("loading best model from %s" % best_model)

    graph = tf.Graph()
    with graph.as_default():
        with tf.name_scope(model_name):
            model = StyleTransfer(is_training=False,
                                  batch_size=1,
                                  image_h=args.image_h,
                                  image_w=args.image_w,
                                  inf_noise=args.noise)
        model_saver = tf.train.Saver(name='saver', sharded=True)
    try:
        with tf.Session(graph=graph) as session:

            logging.info("Loading model")
            model_saver.restore(session, best_model)

            logging.info("Verify model")
            batch_gen_valid = BatchGenerator(1,
                                             args.image_h,
                                             args.image_w,
                                             valid=True)
            _, _, _, test_out, _ = model.run_epoch(session,
                                                   tf.no_op(),
                                                   None,
                                                   batch_gen_valid,
                                                   num_iterations=1)

            utils.write_image(
                os.path.join(args.input_dir, 'export_verify.png'), test_out)

            logging.info("Exporting model")
            best_model = model_saver.save(session, args.save_model)
            # Save graph def
            tf.train.write_graph(session.graph_def, tmp_dir, "temp_model.pb",
                                 False)

            saver_def = model_saver.as_saver_def()
            input_graph_path = os.path.join(tmp_dir, "temp_model.pb")
            input_saver_def_path = ""  # we dont have this
            input_binary = True
            input_checkpoint_path = args.save_model
            output_node_names = model_name + "/output"
            restore_op_name = saver_def.restore_op_name
            filename_tensor_name = saver_def.filename_tensor_name
            output_graph_path = os.path.join(args.input_dir, args.output)
            clear_devices = False

            freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                                      input_binary, input_checkpoint_path,
                                      output_node_names, restore_op_name,
                                      filename_tensor_name, output_graph_path,
                                      clear_devices, None)
            shutil.rmtree(tmp_dir)
    except:
        print("Unexpected error:", sys.exc_info()[0])
        raise
コード例 #29
0
def gaussian_blur(r):
    image = read_image(r)
    image = cv.GaussianBlur(image, (5, 5), 0)
    return write_image(image, format="png")
コード例 #30
0
def blur(r):
    image = read_image(r)
    image = cv.blur(image, (5, 5))
    return write_image(image, format="png")
コード例 #31
0
ファイル: main_gatys.py プロジェクト: tonypeng/ml-playground
                            - styled_image[:, 1:, :, :]))

    # Total loss
    loss = CONTENT_WEIGHT * loss_content + STYLE_WEIGHT * loss_style + DENOISE_WEIGHT * loss_tv

    # 4. Optimize
    print("4. Optimizing...")
    optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(loss)

    sess.run(tf.initialize_all_variables())
    best_loss = float('inf')
    best_styled_image = None
    for n in range(ITERATIONS):
        sess.run(optimizer)
        curr_loss_content, curr_loss_style = sess.run([loss_content, loss_style])
        if curr_loss_content + curr_loss_style < best_loss:
            best_loss = curr_loss_content + curr_loss_style
            best_styled_image = styled_image.eval()

        if n % 10 == 0:
            current_styled_image = utils.postprocess_image(styled_image.eval(), img_mean)
            output_path = utils.stylized_path(CONTENT_PATH, INITIAL_IMAGE_PATH, STYLE_PATH, 'step'+str(n))
            utils.write_image(current_styled_image, output_path)

        print("Iteration " + str(n + 1) + ": L_content=" + str(curr_loss_content) + "; L_style=" + str(curr_loss_style)
              + "; Loss=" + str(curr_loss_content + curr_loss_style))

best_styled_image = utils.postprocess_image(best_styled_image, img_mean)
output_path = utils.stylized_path(CONTENT_PATH, INITIAL_IMAGE_PATH, STYLE_PATH)
utils.write_image(best_styled_image, output_path)
コード例 #32
0
ファイル: main.py プロジェクト: mostafamasoudi/algorithm-fa99
def main():
    # get command line arguments
    args = get_args()
    
    # read input image
    image = read_image(args.input_file)

    all_frames.append(image)

    # convert image to array format for do calculations
    im_array = numpy.array(image, dtype=numpy.int16)

    # declare width and height with data of last state of image
    im_width = image.width
    im_height = image.height

    # apply change in width order
    for i in range(args.dx):

        # logging state
        print(f"\nround: {i} -> im_array_width: {im_array.shape[1]}, im_array_height: {im_array.shape[0]}")

        s = time.time()

        # calculate energy function
        energy = get_energy_B(im_array, im_width, im_height)

        print(f"calculate energy array: {time.time() - s} s")
        s = time.time()

        # find seam and create its mask
        mask = find_seam(energy, im_width, im_height)

        print(f"find seam: {time.time() - s} s")

        # make seam white in image and save frame
        frame = coloring_seams(im_array, mask)
        all_frames.append(frame)

        # remove seam from image
        im_array = im_array[mask].reshape(im_height, (im_width - 1), 3)
        im_width -= 1
    

    # rotate image 90 degree for apply seam carving in height direction
    im_array = numpy.rot90(im_array, 1, (1, 0))
    (im_width, im_height) = (im_height, im_width)


    # apply change in height order
    for i in range(args.dy):

        # logging state
        print(f"\nround: {i} -> im_array_width: {im_array.shape[1]}, im_array_height: {im_array.shape[0]}")

        s = time.time()

        # calculate energy function
        energy = get_energy_B(im_array, im_width, im_height)

        print(f"calculate energy array: {time.time() - s} s")
        s = time.time()

        # find seam and create its mask
        mask = find_seam(energy, im_width, im_height)

        print(f"find seam: {time.time() - s} s")

        # make seam white in image and save frame
        frame = coloring_seams(im_array, mask)
        all_frames.append(frame.rotate(90))

        # remove seam from image
        im_array = im_array[mask].reshape(im_height, im_width - 1, 3)
        im_width -= 1
    

    # rotate image to first state
    im_array = numpy.rot90(im_array, -1, (1, 0))

    # create and save gif file
    all_frames[0].save(f"{args.output.split('.')[0]}.gif",
                        save_all=True, append_images=all_frames[1:], duration=500, loop=2)
    
    # create output file
    write_image(im_array, f"{args.output}")