示例#1
0
def generator(samples, batch_size=32):
    num_samples = len(samples)

    while 1:
        shuffle(samples)
        for offset in range(0, num_samples, batch_size):
            batch_samples = samples[offset:offset + batch_size]

            # for each batch, open the image file to RGB, remove any noise
            # with gaussian blur and resize the image to fit into neural network
            images = []
            angles = []
            for batch_sample in batch_samples:
                center_image = read_image_rgb(batch_sample[0])
                center_image = remove_noise(center_image)
                center_image = resize_image(center_image)

                measurement = float(batch_sample[1])

                images.append(center_image)
                angles.append(measurement)

                flipped_image, flipped_measurement = flip_image(
                    center_image, measurement)
                images.append(flipped_image)
                angles.append(flipped_measurement)

            X_train = np.array(images)
            y_train = np.array(angles)
            yield shuffle(X_train, y_train)
示例#2
0
    def POST(self, name = None):
        xinput = web.input()
        ifile = web.input(avatar={})
        filePath_a = goods_image_name()
        filePath = filePath_a[0]
        if 'avatar' in ifile:
            ifile['avatar'].file.seek(0, os.SEEK_END)
            if ifile.avatar.file.tell() > 0:
                fout = open(filePath % '_o','w') # creates the file where the uploaded file should be stored
                ifile.avatar.file.seek(0)
                fout.write(ifile.avatar.file.read()) # writes the uploaded file to the newly created file.
                fout.close() # closes the file, upload complete.

                resize_image(filePath % '_o', filePath % '_256',  256, refMode = 'both')
                resize_image(filePath % '_o', filePath % '_512',  512, refMode = 'both')
                resize_image(filePath % '_o', filePath % '_800',  800, refMode = 'both')



        thegoods = goods()
        thegoods.bind(xinput)
        thegoods.avatar_path.zval = filePath_a[1]
        thegoods.update_date.zval = datetime.datetime.now()
        #return thegoods.dump()
        if name is None:
            thegoods.create_date.zval = datetime.datetime.now()
            insert(thegoods)
        else:
            thegoods._id.val(name)
            update(thegoods)
        raise web.seeother("/admin/goods/")
示例#3
0
    def recognize(self,
                  images,
                  detection_kwargs=None,
                  recognition_kwargs=None):
        """Run the pipeline on one or multiples images.

        Args:
            images: The images to parse (can be a list of actual images or a list of filepaths)
            detection_kwargs: Arguments to pass to the detector call
            recognition_kwargs: Arguments to pass to the recognizer call

        Returns:
            A list of lists of (text, box) tuples.
        """

        # Make sure we have an image array to start with.
        if not isinstance(images, np.ndarray):
            images = [tools.read(image) for image in images]
        # This turns images into (image, scale) tuples temporarily
        images = [
            tools.resize_image(image,
                               max_scale=self.scale,
                               max_size=self.max_size) for image in images
        ]
        max_height, max_width = np.array(
            [image.shape[:2] for image, scale in images]).max(axis=0)
        scales = [scale for _, scale in images]
        images = np.array([
            tools.pad(image, width=max_width, height=max_height)
            for image, _ in images
        ])
        if detection_kwargs is None:
            detection_kwargs = {}
        if recognition_kwargs is None:
            recognition_kwargs = {}
        box_groups = self.detector.detect(images=images, **detection_kwargs)
        prediction_groups = self.recognizer.recognize_from_boxes(
            images=images, box_groups=box_groups, **recognition_kwargs)
        box_groups = [
            tools.adjust_boxes(
                boxes=boxes, boxes_format='boxes', scale=1 /
                scale) if scale != 1 else boxes
            for boxes, scale in zip(box_groups, scales)
        ]
        return [
            list(zip(predictions, boxes))
            for predictions, boxes in zip(prediction_groups, box_groups)
        ]
示例#4
0
    def POST(self, name = None):
        xinput = web.input()
        if xinput.has_key('act') and xinput.act == 'del':
            imgid = xinput.id
            mgoodsImg = find_one(goods_picture, _id=imgid)
            xpath = mgoodsImg.path.zval
            try:
                del_goods_images(xpath)
            except OSError:
                pass
            remove(goods_picture, _id=imgid)
            return 1
        elif xinput.has_key('act') and xinput.act == 'avatar':
            imgid = xinput.id
            mgoodsImg = find_one(goods_picture, _id=imgid)
            xpath = mgoodsImg.path.zval
            xgoods_id = mgoodsImg.goods_id.zval
            mGoods = goods()
            mGoods._id.zval = xgoods_id
            mGoods.avatar_path.zval = xpath
            update(mGoods)
            return 1

        ifile = web.input(avatar={})
        filePath_a = goods_image_name()
        filePath = filePath_a[0]

        if 'avatar' in ifile:
            ifile.avatar.file.seek(0, os.SEEK_END)
            if ifile.avatar.file.tell() > 0:
                fout = open(filePath % '_o','w') # creates the file where the uploaded file should be stored
                ifile.avatar.file.seek(0)
                fout.write(ifile.avatar.file.read()) # writes the uploaded file to the newly created file.
                fout.close() # closes the file, upload complete.

                resize_image(filePath % '_o', filePath % '_256',  256, refMode = 'both')
                resize_image(filePath % '_o', filePath % '_512',  512, refMode = 'both')
                resize_image(filePath % '_o', filePath % '_800',  800, refMode = 'both')

                thepic = goods_picture()
                thepic.bind(xinput)
                thepic.path.zval = filePath_a[1]
                thepic.goods_id.zval = name
                thepic.create_date.zval = datetime.datetime.now()
                insert(thepic)
        raise web.seeother("/admin/goods/images/" + name)
示例#5
0



if __name__ == '__main__': 
    tf.config.list_physical_devices('GPU')   

    args, _ = parse_args()

    FLAGS['process_max_epoch'] = args.epochs
    FLAGS['folder_model']= args.model_dir
    FLAGS['gpu']= args.n_gpu
    FLAGS['input_folder']= args.train_input
    FLAGS['label_folder']= args.label_input

    tools.resize_image(FLAGS['input_folder'],max_length = 512)
    tools.resize_image(FLAGS['label_folder'],max_length = 512)

    tf.compat.v1.disable_eager_execution()
    print(tf.config.list_physical_devices('GPU'))
    #tf.compat.v1.disable_resource_variables()
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS['num_gpu']
    sys.stdout = Tee(sys.stdout, open(FLAGS['txt_log'], 'a+'))

    LOG_STAMP_TRAIN = np.int32(np.linspace(0, FLAGS['data_train_batch_count']-1, num=FLAGS['process_train_log_interval_epoch']+1))
    LOG_STAMP_TEST  = np.int32(np.linspace(0, FLAGS['data_train_batch_count']-1, num=FLAGS['process_test_log_interval_epoch'] +1))

    def netG_concat_value(tensor, v):
        v_t = tf.constant(v, dtype=tf.float32, shape=tensor.get_shape().as_list()[:3] + [1])
        tensor = tf.concat(3, [tensor, v_t])
        return tensor
示例#6
0
    input_img = input_img[None, :, :, :]
    dict_d = [input_img, rect, 0]
    dict_t = [test_df.input1_src] + \
        test_df.mat1.rect + test_df.mat1.rot
    enhance_test_img = sess.run(
        netG_test_output1_crop,
        feed_dict={t: d
                   for t, d in zip(dict_t, dict_d)})
    enhance_test_img = safe_casting(
        enhance_test_img * tf.as_dtype(FLAGS['data_input_dtype']).max,
        FLAGS['data_input_dtype'])
    enhanced_img_file_name = file_out_name_without_ext + FLAGS[
        'data_output_ext']
    enhance_img_file_path = FLAGS['folder_test_img'] + enhanced_img_file_name
    #try:
    #    print(current_time() + ', try remove file path = %s' % enhance_img_file_path)
    #    os.remove(enhance_img_file_path)
    #except OSError as e:
    #    print(current_time() + ', remove fail, error = %s' % e.strerror)
    cv2.imwrite(enhance_img_file_path, enhance_test_img)
    return enhanced_img_file_name


if __name__ == '__main__':
    args, _ = parse_args()

    FLAGS['inference_folder'] = args.inference_dir
    tools.resize_image(FLAGS['inference_folder'])
    print('processing')
    processImg('a0002.tif', 'totaltest')
示例#7
0



if __name__ == '__main__': 
    tf.config.list_physical_devices('GPU')   

    args, _ = parse_args()

    FLAGS['process_max_epoch'] = args.epochs
    FLAGS['folder_model']= args.model_dir
    FLAGS['gpu']= args.n_gpu
    FLAGS['folder_input']= args.train_input
    FLAGS['folder_label']= args.label_input

    tools.resize_image(FLAGS['folder_input'],max_length = 512)
    tools.resize_image(FLAGS['folder_label'],max_length = 512)

    tf.compat.v1.disable_eager_execution()
    print(tf.config.list_physical_devices('GPU'))
    #tf.compat.v1.disable_resource_variables()
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS['num_gpu']
    sys.stdout = Tee(sys.stdout, open(FLAGS['txt_log'], 'a+'))

    LOG_STAMP_TRAIN = np.int32(np.linspace(0, FLAGS['data_train_batch_count']-1, num=FLAGS['process_train_log_interval_epoch']+1))
    LOG_STAMP_TEST  = np.int32(np.linspace(0, FLAGS['data_train_batch_count']-1, num=FLAGS['process_test_log_interval_epoch'] +1))

    def netG_concat_value(tensor, v):
        v_t = tf.constant(v, dtype=tf.float32, shape=tensor.get_shape().as_list()[:3] + [1])
        tensor = tf.concat(3, [tensor, v_t])
        return tensor