def create_submit(args):
    with open(args.pred_file, 'rb') as f:
        print('loading: ', args.pred_file)
        preds = pickle.load(f)

    classes, _ = get_top_classes(args.start_index, args.end_index)

    df_test = pd.read_csv(os.path.join(settings.DATA_DIR, 'VRD_sample_submission.csv'))
    print('getting image sizes')
    df_test['h'] = df_test.ImageId.map(lambda x: get_image_size(os.path.join(settings.TEST_IMG_DIR, '{}.jpg'.format(x)))[1])
    df_test['w'] = df_test.ImageId.map(lambda x: get_image_size(os.path.join(settings.TEST_IMG_DIR, '{}.jpg'.format(x)))[0])
    print(df_test.head())

    final_preds = []

    for p in tqdm(preds, total=len(preds)):
        final_preds.append(get_preds(p, len(classes), args.th))
    total_objs = 0
    for p in final_preds:
        total_objs += len(p['labels'])
    print('total predicted objects:', total_objs)

    pred_strs = []
    for i, p in tqdm(enumerate(final_preds), total=len(final_preds)):
        h = df_test.iloc[i].h
        w = df_test.iloc[i].w
        pred_strs.append(get_pred_str(p, w, h, classes))
    df_test.PredictionString = pred_strs
    print(df_test.head())
    df_test.to_csv(args.out, index=False, columns=['ImageId', 'PredictionString'])
    print('done')
Example #2
0
def blend_images_effect(
            image1: image_types,
            image2: image_types,
            image1_alpha=0.7,
            image2_alpha=0.3,
            zeta=0,
            resolution=(1920, 1080),
        ) -> np.ndarray:
    """
    Alpha blending images
    :param image1: first image (one of image_type)
    :param image2: second image (one of image_type)
    :param image1_alpha: float value 0.0 - 1.0
    :param image2_alpha: float value 0.0 - 1.0
    :param zeta: additional int value
    :param resolution: both images resolution should match to it
    :return: OpenCV image
    """

    s1, s2 = utils.get_image_size(image1), utils.get_image_size(image2)

    if s1 != resolution:
        image1 = cv2.resize(image1, resolution)
    if s1 != resolution:
        image1 = cv2.resize(image2, resolution)

    return cv2.addWeighted(
        utils.load_image(image1, np.ndarray),
        image1_alpha,
        utils.load_image(image2, np.ndarray),
        image2_alpha,
        zeta
    )
def submit(args):
    global preds, classes

    classes, _ = get_top_classes(args.start_index, args.end_index,
                                 args.class_file)
    print('loading {}...'.format(args.pred_file))
    with open(args.pred_file, 'rb') as f:
        preds = pickle.load(f)

    print('len(preds):', len(preds))
    print('num classes of preds:', len(preds[0][1]))
    print('specified num classes:', len(classes))
    #assert len(preds[0][1]) == len(classes)

    print('creating submission...')
    df_test = pd.read_csv(osp.join(DATA_DIR, 'sample_empty_submission.csv'))
    df_test.ImageWidth = df_test.ImageID.map(
        lambda x: get_image_size(get_fn(x))[0])
    df_test.ImageHeight = df_test.ImageID.map(
        lambda x: get_image_size(get_fn(x))[1])
    df_test['img_index'] = df_test.index
    df_test = parallel_apply(df_test, set_pred_str)
    df_test = df_test.drop(columns=['img_index'], axis=1)

    df_test.to_csv(args.out, index=False)
    print('done')
Example #4
0
    def set_image(self, image_inst):
        curr_size = utils.get_image_size(self.image)
        next_size = utils.get_image_size(image_inst)

        if curr_size != next_size:
            image_inst = cv2.resize(utils.convert_image_type(image_inst, numpy.ndarray), curr_size)
            image_inst = utils.convert_image_type(image_inst, type(image_inst))

        self.image = image_inst
Example #5
0
def create_val_prediction(args):
    with open(args.pred_file, 'rb') as f:
        print('loading: ', args.pred_file)
        preds = pickle.load(f)
    df_test = pd.read_csv(os.path.join(settings.DATA_DIR, 'val_imgs.csv'))
    img_dir = settings.VAL_IMG_DIR
    print('getting image sizes')
    df_test['h'] = df_test.ImageId.map(
        lambda x: get_image_size(os.path.join(img_dir, '{}.jpg'.format(x)))[1])
    df_test['w'] = df_test.ImageId.map(
        lambda x: get_image_size(os.path.join(img_dir, '{}.jpg'.format(x)))[0])
    print(df_test.head())

    final_preds = []

    for p in tqdm(preds, total=len(preds)):
        final_preds.append(get_preds(p, args.th))
    total_objs = 0
    for p in final_preds:
        total_objs += len(p['labels'])
    print('total predicted objects:', total_objs)

    classes, _ = get_classes()
    res = []
    for i, (img_id,
            p) in tqdm(enumerate(zip(df_test.ImageId.values, final_preds)),
                       total=len(final_preds)):
        h = df_test.iloc[i].h
        w = df_test.iloc[i].w
        #pred_strs.append(get_pred_str(p, w, h, classes))
        for label, score, bbox in zip(p['labels'], p['scores'], p['bboxes']):
            label1, label2 = classes[label].split(',')
            det = {
                'ImageID': img_id,
                'LabelName1': label1,
                'LabelName2': label2,
                'XMin1': bbox[0] / w,
                'YMin1': bbox[1] / h,
                'XMax1': bbox[2] / w,
                'YMax1': bbox[3] / h,
                'XMin2': bbox[0] / w,
                'YMin2': bbox[1] / h,
                'XMax2': bbox[2] / w,
                'YMax2': bbox[3] / h,
                'RelationshipLabel': 'is',
                'Score': score
            }
            res.append(det)

    #df_test['PredictionString'] = pred_strs
    df_val = pd.DataFrame(res)
    print(df_val.head())
    df_val.to_csv(args.out, index=False)
    print('done')
Example #6
0
    def on_click(event):
        """Plot click event handler - zoom into set"""
        global box
        print event.inaxes
        box_width = (box[1].real - box[0].real)
        box_height = (box[1].imag - box[0].imag)
        center = complex(box[0].real + (float(event.xdata)/get_image_size(img)[0])*box_width,
                         box[0].imag + (1.0-(float(event.ydata)/get_image_size(img)[1]))*box_height)
        new_box_size = complex(box_width/1.5, box_height/1.5)
        box[0] = center - new_box_size/2
        box[1] = center + new_box_size/2

        render_mandel_iterative(img, box[0], box[1], max_iter)
        ax.imshow(img, cm.spectral, interpolation='nearest')
        plt_show()
Example #7
0
    def render_avi(dir_name: str, avi_full_path: str, fps=25):

        if not os.path.isdir(dir_name):
            raise OSError

        if not avi_full_path.lower().endswith('.avi'):
            avi_full_path += '.avi'

        if dir_name[-1] not in ['\\', '/']:
            dir_name += os.sep

        frames = list((dir_name + img for img in os.listdir(dir_name) if img.split('.')[-1].lower() in ['jpg', 'png']))
        frames = sorted(frames, key=lambda x: int(x.split(os.sep)[1].split('.')[0]))

        frames_len = len(frames)
        frame_res = utils.get_image_size(frames[0])

        writer = cv2.VideoWriter(avi_full_path, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), fps, frame_res)

        print('Frames count: ' + str(frames_len))
        j = 0
        for i in range(frames_len):
            if i > 0 and i % fps == 0:
                print(f'{i+1}/{frames_len}', end=', ')
                j += 1
                if j > 0 and j % 10 == 0:
                    print(end='\n')
            writer.write(utils.load_image(frames[i], numpy.ndarray))

        print('Rendering AVI Done!')
Example #8
0
def dump_test(_X, batch_size=config.batch_size, mode='res'):
    image_size = utils.get_image_size(mode)

    num_batch = int(np.ceil(len(_X) / config.batch_size))
    for i in range(num_batch):
        if i % 30 == 0:
            print("Batch %d" % (i))
        x_batch, paths = [], []
        for path in _X[i * config.batch_size:(i + 1) * config.batch_size]:
            try:
                img = image.load_img(path,
                                     target_size=(image_size, image_size))
            except:
                print(path)
                continue

            img = image.img_to_array(img)
            x_batch.append(img)

            path = path.split('/')[1].split('.')[0]
            paths.append(path)

        x_batch = np.array(x_batch)
        x_batch = get_features(x_batch, mode)

        paths = np.array(paths)
        pickle.dump((x_batch, paths),
                    open('x_test_' + mode + '/' + str(i) + '.pickle', 'wb'),
                    pickle.HIGHEST_PROTOCOL)
Example #9
0
def draw_fleet(fb, x, y, rows, columns, padding, alien, mask, img_idx):
    """ Draw table with same bitmap"""
    w, h = get_image_size(alien[img_idx])
    for i in xrange(columns):
        for j in xrange(rows):
            draw_bitmap_2(fb, x + i*(w+padding), y + j*(h+padding),
                          alien[img_idx], mask[img_idx])
Example #10
0
    def set_effects(self, kwargs: dict):
        self.effects = {}
        no_effects_list = create_empty_callbacks_cl(self.frames_count)

        for item in ('preprocess_first', 'preprocess_second',
                     'postprocess_first', 'postprocess_second'):

            value = kwargs.get(item, no_effects_list)

            print(f'@@ set_effects!! item={item} of type={type(value)}')

            if isclass(value):
                # noinspection PyTypeChecker
                if issubclass(value, ImageTravel):
                    value: ImageTravel = value(
                        image=self.im1,
                        frames_count=self.frames_count,
                        frame_resolution=utils.get_image_size(self.im1))

            if isinstance(value, ImageTravel):
                print('TRANSITION: if isinstance(value, ImageTravel):')
                cb = utils.Callback(fun_ptr=value.next_frame,
                                    needs_params=('frame_index', 'im1'))

                value: utils.CircleList = utils.CircleList(
                    list_or_tuple=[cb], max_iterations=self.frames_count)

            if isinstance(value, (list, tuple)):
                value: utils.CircleList = utils.CircleList(
                    list_or_tuple=value, max_iterations=self.frames_count)
            if not isinstance(value, utils.CircleList):
                raise TypeError
            value.max_iterations = self.frames_count
            self.effects[item] = value
Example #11
0
def save_to_seaweedfs(message):
    host = CONFIG['seaweed_filler_host']
    port = CONFIG['seaweed_filler_port']

    try:
        images_list = message['preview_images']
        _dir = message['document_uuid']
        original_document = message['original_document']
        document_name = message['document_name']
        result_list = save_files_filler_batch(host, port, _dir, images_list)

        _splited = os.path.split(original_document)
        _ex = _splited[1].split('.')[1]
        document_file = '%s.%s' % (document_name, _ex)
        new_original_document = os.path.join(_splited[0], document_file)

        os.rename(original_document, new_original_document)
        save_file_to_filler(host, port, _dir, new_original_document)

        message['cover_image'] = result_list[0]
        width, height = get_image_size(images_list[0])
        message['preview_image_width'] = width
        message['preview_image_height'] = height
        message['original_document'] = document_file

        # TODO: add /tmp to conf
        shutil.rmtree(os.path.join('/tmp', _dir))
        del message['preview_images']

        return message
    except Exception as e:
        sentry_client(e)
Example #12
0
	def work(item):
		if item.icon:
			print 'thread start'
			if item.icon.startswith('plugins'):
				item.icon = '/{}'.format(item.icon)
			else:
				item.icon = CACHE.get(item.icon)
			item.info['poster'] = item.icon
			try:
				item.width, item.height = utils.get_image_size('.{}'.format(item.icon))
				if item.width > 600 or item.height > 600:
					item.width = item.width / 2
					item.height = item.height / 2
				if item.width < 200 or item.height < 200:
					item.width = item.width * 2
					item.height = item.height * 2
			except:
				traceback.print_exc(file=sys.stdout)
				item.width = 300
				item.height = 300
			print 'thread done'
		else:
			item.width = 300
			item.height = 300
		item.context['Item info'] = 'ItemInfo({})'.format(json.dumps(item.info))			
Example #13
0
 def work(item):
     if item.icon:
         print 'thread start'
         if item.icon.startswith('plugins'):
             item.icon = '/{}'.format(item.icon)
         else:
             item.icon = CACHE.get(item.icon)
         item.info['poster'] = item.icon
         try:
             item.width, item.height = utils.get_image_size('.{}'.format(
                 item.icon))
             if item.width > 600 or item.height > 600:
                 item.width = item.width / 2
                 item.height = item.height / 2
             if item.width < 200 or item.height < 200:
                 item.width = item.width * 2
                 item.height = item.height * 2
         except:
             traceback.print_exc(file=sys.stdout)
             item.width = 300
             item.height = 300
         print 'thread done'
     else:
         item.width = 300
         item.height = 300
     item.context['Item info'] = 'ItemInfo({})'.format(json.dumps(
         item.info))
Example #14
0
def next_batch(_X, _Y, batch_size=128, mode='res'):
    image_size = utils.get_image_size(mode)

    num_batch = int(np.ceil(len(_Y) / config.batch_size))
    for i in range(num_batch):
        if i % 30 == 0:
            print("Batch: %d" % (i))
        x_batch, y_batch = [], []
        for path, label in zip(
                _X[i * config.batch_size:(i + 1) * config.batch_size],
                _Y[i * config.batch_size:(i + 1) * config.batch_size]):
            try:
                img = image.load_img(path,
                                     target_size=(image_size, image_size))
            except:
                continue

            img = image.img_to_array(img)
            x_batch.append(img)

            y_tmp = np.zeros((config.num_classes, ))
            y_tmp[label] = 1
            y_batch.append(y_tmp)

        x_batch = np.array(x_batch)
        x_batch = get_features(x_batch, mode)

        y_batch = np.array(y_batch)

        yield x_batch, y_batch
Example #15
0
    def reload(self, **kwargs) -> True or utils.image_types:
        """
        Reloads image from drive with other params than in __init__
        :keyword kwargs:
            :key image_type:  PIL.Image.Image or numpy.ndarray, or passed in __init__
            :key resolution:  new resolution or passed in __init__
            :key return_image: if passed and set to True, method returns reloaded image, else returns True
        :return: look above
        """
        self.image = utils.load_image(self.init_params['full_path'], self.init_params['image_type'])
        resolution = self.init_params['kwargs']['resolution']
        image_type = kwargs.get('image_type', self.init_params['image_type'])

        if kwargs.get('resolution', None) is not None:
            resolution = kwargs['resolution']

        if resolution is not None:
            print('ImageHandler reloading, resize to', resolution)
            loaded_image_size = utils.get_image_size(self.image)
            if loaded_image_size != resolution:
                self.image = utils.convert_image_type(
                    source_image=cv_effects.cut_rect(
                        image=self.image,
                        src_box=(0, 0, *loaded_image_size),
                        size=resolution
                    ),
                    dest_type=image_type
                )
        self.init_params['reload_params'] = kwargs
        if kwargs.get('return_image', None) is not None:
            return self.image
        return True
Example #16
0
def fit_images(images_list: list, resolution=(1920, 1080)) -> list:

    for i in range(len(images_list)):
        if resolution != utils.get_image_size(images_list[i]):
            images_list[i] = cv2.resize(images_list[i], resolution)

    return images_list
Example #17
0
 def _get_data(self, json_file_path):
     """ 
     Retrun dict with list of images and list of classes and list of objects,
     objects have img_path, widtha and height of image, list of dict with singel boxes,
     and list of labels.
     :param json_file_path: path to json file to open
     :type json_file_path: str
     :retruns: dict of json data
     :rtype: dict
     """
     output = {}
     objects = []
     images = []
     uniq_dataset_classes = []
     json_data = self._read_json_file(json_file_path)
     for image_json_object in json_data:
         if not bool(image_json_object["Label"]):
             continue
         object_data = {}
         image_save_path = os.path.join(self.tmp_folder,
                                        image_json_object["ID"] + ".jpg")
         urllib.request.urlretrieve(image_json_object["Labeled Data"],
                                    image_save_path)
         self._check_images_orientation(image_save_path)
         boxes = list()
         labels = list()
         for object_on_img in image_json_object["Label"]["objects"]:
             xmin = object_on_img['bbox']['left']
             ymin = object_on_img['bbox']['top']
             xmax = xmin + object_on_img['bbox']['width']
             ymax = ymin + object_on_img['bbox']['height']
             boxes.append({
                 'xmin': xmin,
                 'ymin': ymin,
                 'xmax': xmax,
                 'ymax': ymax
             })
             labels.append(object_on_img["value"])
             if not object_on_img["value"] in uniq_dataset_classes:
                 uniq_dataset_classes.append(object_on_img["value"])
         width, height, depth = get_image_size(image_save_path)
         object_data.update({'img_name': os.path.split(image_save_path)[1]})
         object_data.update({'img_path': image_save_path})
         object_data.update({'width': width})
         object_data.update({'height': height})
         object_data.update({'depth': depth})
         object_data.update({'boxes': boxes})
         object_data.update({'labels': labels})
         objects.append(object_data)
         images.append(image_save_path)
     output.update({'images': images})
     uniq_dataset_classes = self._format_dataset_classes(
         uniq_dataset_classes)
     output.update({'dataset_classes': uniq_dataset_classes})
     output.update({'objects': objects})
     return output
Example #18
0
def render_mandel_iterative(img, ll_corner, ur_corner, max_iter=100):
    """ Simplest approach, iterating over every point """
    size = get_image_size(img)
    x_axis = np.linspace(ll_corner.real, ur_corner.real, num=size[0])
    y_axis = np.linspace(ur_corner.imag, ll_corner.imag, num=size[1])
    for i in xrange(size[1]):
        for j in xrange(size[0]):
            val, iteration, max_reached = mandel_point(complex(x_axis[j], y_axis[i]), max_iter)
            if not max_reached:
                putpixel(img, j, i, iteration)
            else:
                putpixel(img, j, i, 0)
Example #19
0
def get_image(html, query, current):
    # extract image url    
    img_list = get_img_list(html)
    num = 0
    if img_list:
        count = 0
        # extract title
        title_list = get_title_list(html)
        title_list = remove_label(title_list)
        # extract size
        width_list = get_width_list(html)
        height_list = get_height_list(html)
        for img_url in img_list:
            # extract url
            img_url = get_image_url(img_url)
            # crawl image on the page
            try:
                # detect the image(find the bigest face in the image)
                result = api.detection.detect(url=img_url, mode='oneface')
                if result['face']:
                    image_path = '%s%s.jpg' % (IMAGE_DIR, current+num)
                    # download image
                    filename, msg = urllib.urlretrieve(img_url, image_path)
                    # if download successfully
                    if msg['Content-Length']:
                        # append tag to image
                        append_tags(image_path, get_image_size(width_list[count]), get_image_size(height_list[count]),
                            get_image_title(title_list[count]), img_url, query, result['face'][0]['attribute'])
                        # output info
                        print "%s: %s" % (current+num, img_url)
                        num += 1
                        if num >= EACH:
                            break
            except Exception as err:
                print(err)
            count += 1
    else:
        print "%s didn't have .jpg format picture" % query
    return current+num
Example #20
0
def ensemble(args):
    global all_preds, classes, ens_dets, MAX_NUM
    MAX_NUM = args.max_num

    classes, _ = get_top_classes(args.start_index, args.end_index,
                                 args.class_file)
    for fn in pred_files:
        print('loading {} ...'.format(fn))
        with open(fn, 'rb') as f:
            all_preds.append(pickle.load(f))

    print('len(preds):', len(all_preds[0]))
    print('num classes of preds:', len(all_preds[0][0][1]))
    print('specified num classes:', len(classes))
    #assert len(preds[0][1]) == len(classes)

    with Pool(24) as p:
        num_imgs = len(all_preds[0])
        #ens_dets = list(tqdm(iterable=p.map(get_ens_det, list(range(num_imgs))), total=num_imgs))
        ens_dets = p.map(get_ens_det, range(num_imgs))
    #num_imgs = len(preds1)
    #for idx in tqdm(range(num_imgs), total=num_imgs):
    #    ens_dets.append(get_ens_det(idx))

    print('getting img size...')
    df_test = pd.read_csv(osp.join(DATA_DIR, 'sample_empty_submission.csv'))
    df_test.ImageWidth = df_test.ImageID.map(
        lambda x: get_image_size(get_fn(x))[0])
    df_test.ImageHeight = df_test.ImageID.map(
        lambda x: get_image_size(get_fn(x))[1])

    print('creating submission...')

    df_test['img_index'] = df_test.index
    df_test = parallel_apply(df_test, set_pred_str)
    df_test = df_test.drop(columns=['img_index'], axis=1)

    df_test.to_csv(args.out, index=False)
    print('done')
Example #21
0
def animate():
    """ Draw alien fleet repeatedly, taking care
    of descending to next row when appropriate"""
    global fig


    w, h = get_image_size(alien[0])
    rows = 4
    columns = 10
    padding = 10

    yend = yres - rows * (h + padding)
    # last column does not need right padding
    row_width = (w + padding) * columns - padding


    y = 0
    x = padding
    xdelta = 3
    ydelta = 4
    flip_freq = 10
    frame_num = 0
    img_idx = 0
    key = False
    def on_key_press(event):
        print "Key!"
        key = True
    #hid = fig.canvas.mpl_connect('key_press_event', on_key_press)
    while y <= yend and not key:
        frame_num += 1
        fb[:] = img[:]
        x += xdelta

        if frame_num % flip_freq == 0:
            img_idx += 1
            if img_idx >= len(alien):
                img_idx = 0

        if x + row_width >= xres:
            x -= (x + row_width) % xres
            xdelta = -xdelta
            y += ydelta
        elif x < 0:
            xdelta = -xdelta
            x = x + xdelta
            y += ydelta
        draw_fleet(fb, x, y, rows, columns, padding, alien, mask, img_idx)

        dat.set_data(fb)
        fig.canvas.draw()
        plt.pause(0.001)
Example #22
0
def draw_bitmap(img, x, y, bitmap, mask=None):
    """ Draws bitmap on image array, on positions x, y
    optionally using mask
    """
    if mask is not None:
        assert bitmap.shape[0:2] == mask.shape

    w, h = get_image_size(bitmap)
    for yi in xrange(h):
        for xi in xrange(w):
            # anything that is not 0 in mask means take that pixel
            m = mask is None or not getpixel(mask, xi, yi)
            if m:
                putpixel(img, x+xi, y+yi, getpixel(bitmap, xi, yi))
Example #23
0
def draw_bitmap_2(img, x, y, bitmap, mask=None):
    """ More numpy-ish way"""
    if mask is not None:
        assert bitmap.shape[0:2] == mask.shape

    w, h = get_image_size(bitmap)
    if mask is not None:
        if img.ndim == 3:
            # take care of case with RGB image & single component mask
            img[y:y+h, x:x+w] &= mask[:, :, np.newaxis]
        elif img.ndim == 2:
            img[y:y+h, x:x+w] &= mask
        img[y:y+h, x:x+w] |= bitmap
    else:
        img[y:y+h, x:x+w] = bitmap
Example #24
0
 def _check_images_orientation(self, image_path):
     """
     Check if dowloaded images have good orientation, and if bad
     orientation that will be rotated.
     :param image_path: path image
     :type image_path: str
     :retruns: None
     :rtype: None
     """
     width, height, depth = get_image_size(image_path)
     if height > width:
         image = cv2.imread(image_path)
         image = ndimage.rotate(image, 90)
         cv2.imwrite(image_path, image)
     return None
Example #25
0
def mask_from_bitmap(img):
    """ Derive mask for blitting image img. Mask has value 0
    iff given pixel or any of its neighbours has
    nonzero value, 255 otherwise"""

    mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
    w, h = get_image_size(img)
    for i in xrange(h):
        for j in xrange(w):
            # in this case neighbours include pixel itself
            neighbours = img[max(0, i-1) : min(i+1, h-1),
                             max(0, j-1) : min(j+1, w-1)]
            mask[i][j] = 0 if np.any(neighbours) else 255

    return mask
Example #26
0
def test(model, test_loader, test_files, config):
    """Test the result"""

    # TODO: use config.yml
    conf_thresh = 0.005
    nms_thresh = 0.45
    num_classes = config['num_classes']
    anchors = config['anchors']
    num_anchors = len(anchors) // 2

    line_id = -1
    results = {}
    for i in range(num_classes):
        results[i] = []

    start = time.time()
    for batch_idx, (data, target) in enumerate(test_loader):
        data = data.cuda()
        data = Variable(data, volatile=True)
        output = model(data).data
        batch_boxes = get_region_boxes(output, conf_thresh, num_classes,
                                       anchors, num_anchors, 0, 1)
        for i in range(output.size(0)):
            line_id += 1
            im_id = os.path.basename(test_files[line_id]).split('.')[0]
            width, height = get_image_size(test_files[line_id])
            boxes = batch_boxes[i]
            boxes = nms(boxes, nms_thresh)
            for box in boxes:
                x1 = (box[0] - box[2] / 2) * width
                y1 = (box[1] - box[3] / 2) * height
                x2 = (box[0] + box[2] / 2) * width
                y2 = (box[1] + box[3] / 2) * height

                det_conf = box[4]
                for j in range((len(box) - 5) // 2):
                    cls_conf = box[5 + 2 * j]
                    cls_id = box[6 + 2 * j]
                    prob = det_conf * cls_conf
                    results[cls_id].append((im_id, prob, x1, y1, x2, y2))
        end = time.time()
        print('Batch {}/{}, time cost: {:.4f}s per image'.format(
            batch_idx + 1, len(test_loader),
            (end - start) / ((batch_idx + 1) * output.size(0))))

    return results
Example #27
0
    def __init__(
            self,
            full_path: str,
            image_type: utils.image_types = numpy.ndarray,
            **kwargs):
        """

        :param full_path: full path to image on drive
        :param image_type: PIL.Image.Image or numpy.ndarray
        :keyword kwargs:
            :key load: loads image to RAM, pass True value for it
            :key resolution: tuple (width, height) that tells to handle rescaled image
            :key frame_indexes: list/tuple of ints describing positions on timeline where image should occur
            :key uid_setter: instance of such class with method set_next_uid() -> str
        """

        kwargs['frame_indexes'] = kwargs.get('frame_indexes', [0])
        self.init_params = locals()
        self.uid = None

        if kwargs.get('uid_setter', False):
            uid_setter = kwargs['uid_setter']
            if hasattr(uid_setter, 'set_next_uid'):
                uid_setter = getattr(uid_setter, 'set_next_uid')
                if callable(uid_setter):
                    self.uid = uid_setter()

        self.image = False if not kwargs.get('load', False) else utils.load_image(
            image_or_path=full_path,
            result_type=image_type
        )

        if 'resolution' in kwargs:
            if self.image is not False:
                self.image = utils.load_image(self.image, numpy.ndarray)
                size = utils.get_image_size(self.image)
                if size != kwargs['resolution']:
                    print('ImageHandler resize resolution to', kwargs['resolution'])
                    input('enter...')
                    self.image = cv2.resize(self.image, kwargs['resolution'])

                self.image = utils.convert_image_type(self.image, image_type)
    def __init__(self, appName):
        self.timer = 0

        self.window = ac.newApp(appName)
        ac.setTitle(self.window, "")
        ac.drawBorder(self.window, 0)
        ac.setIconPosition(self.window, 0, -10000)
        ac.setSize(self.window, 367, 73)
        ac.setBackgroundOpacity(self.window, 0)

        self.fastestLapBanner = ac.addLabel(self.window, "")
        ac.setPosition(self.fastestLapBanner, 0, 0)
        w, h = get_image_size(FC.FASTEST_LAP_BANNER)
        ac.setSize(self.fastestLapBanner, w, h)
        ac.setBackgroundTexture(self.fastestLapBanner, FC.FASTEST_LAP_BANNER)

        self.fastestLapBackground = ac.addLabel(self.window, "")
        ac.setPosition(self.fastestLapBackground, w, 0)
        ac.setSize(self.fastestLapBackground, 400, h)
        ac.setBackgroundTexture(self.fastestLapBackground,
                                FC.DRIVER_WIDGET_BACKGROUND)

        self.nameLabel = ac.addLabel(self.window, "")
        ac.setPosition(self.nameLabel, w + 10, 11)
        ac.setFontSize(self.nameLabel, 22)
        ac.setCustomFont(self.nameLabel, FC.FONT_NAME, 0, 0)
        ac.setFontColor(self.nameLabel, 0.86, 0.86, 0.86, 1)
        ac.setFontAlignment(self.nameLabel, "left")

        self.lastNameLabel = ac.addLabel(self.window, "")
        ac.setPosition(self.lastNameLabel, w + 10, 37)
        ac.setFontSize(self.lastNameLabel, 28)
        ac.setCustomFont(self.lastNameLabel, FC.FONT_NAME, 0, 1)
        ac.setFontColor(self.lastNameLabel, 0.86, 0.86, 0.86, 1)
        ac.setFontAlignment(self.lastNameLabel, "left")

        self.timeLabel = ac.addLabel(self.window, "")
        ac.setPosition(self.timeLabel, w + 385, 22)
        ac.setFontSize(self.timeLabel, 35)
        ac.setCustomFont(self.timeLabel, FC.FONT_NAME, 0, 1)
        ac.setFontColor(self.timeLabel, 0.86, 0.86, 0.86, 1)
        ac.setFontAlignment(self.timeLabel, "right")
Example #29
0
def main(args):
    # instantiate model and inputs
    image_size = get_image_size(args.model_name)
    input_shape = [args.bs, 3, image_size, image_size]
    model = get_model(args.model_name)
    dummy_input = torch.randn(*input_shape).cuda()

    with closing(get_inference_wrapper(model, dummy_input,
                                       args.mode)) as inference_wrapper:
        result = evaluate(inference_wrapper)

    df = eval_result_to_df(args.mode, result)
    if args.out_path:
        df.to_csv(args.out_path)
    else:
        summary = pd.DataFrame({
            'mean (ms)': df.mean(),
            'stdev (ms)': df.std()
        })
        print(summary)
Example #30
0
	def shape(self):
		#Standard shape setup of window
		print 'shaping window'
		w,h = self.window.get_size()
		if w==0: w = 100
		if h==0: h = 100
		self.w = w
		self.h = h
		self.pixmap = gtk.gdk.Pixmap (None, w, h, 1)
		ctx = self.pixmap.cairo_create()
		self.bgpb = gtk.gdk.pixbuf_new_from_file(Globals.ImageDirectory + Globals.StartMenuTemplate)
		if Globals.Settings['GtkColors'] == 1 and Globals.Has_Numpy:
			if not self.colorpb:
				bgcolor = Globals.GtkColorCode
				r = (bgcolor.red*255)/65535.0
				g = (bgcolor.green*255)/65535.0
				b = (bgcolor.blue*255)/65535.0
				self.colorpb= self.bgpb.copy()
				for row in self.colorpb.get_pixels_array():
					for pix in row:
						pix[0] = r
						pix[1] = g
				  		pix[2] = b
				self.bgpb.composite(self.colorpb, 0, 0, self.w, self.h, 0, 0, 1, 1, gtk.gdk.INTERP_BILINEAR, 70)
			self.bgpb = self.colorpb

		ctx.save()
		ctx.set_source_rgba(1, 1, 1,0)
		ctx.set_operator (cairo.OPERATOR_SOURCE)
		ctx.paint()
		ctx.restore()
		if Globals.MenuHasIcon==1:
			cairo_drawing.draw_image(ctx,Globals.UserIconFrameOffsetX,Globals.UserIconFrameOffsetY,Globals.UserImageFrame)
			w,h = utils.get_image_size(Globals.UserImageFrame)
			cairo_drawing.draw_scaled_image(ctx,Globals.IconInX +Globals.UserIconFrameOffsetX,Globals.UserIconFrameOffsetY+Globals.IconInY,Globals.UserImage,Globals.IconInW ,Globals.IconInH)
		cairo_drawing.draw_enhanced_image(ctx,0,0,Globals.ImageDirectory + Globals.StartMenuTemplate)

		if self.window.is_composited():
			self.window.input_shape_combine_mask(self.pixmap,0,0)
		else:
			self.window.shape_combine_mask(self.pixmap, 0, 0)
Example #31
0
    def _extract_by_image_size(self, url):
        self.driver.get(url)

        images = self.driver.find_elements_by_tag_name('img')

        for image in images[:4]:
            src = image.get_property('src')

            if not src:
                continue

            image_url = utils.image_src_to_url(url, src)

            try:
                width, height = utils.get_image_size(image_url)
                if (50 <= width <= 100) and (50 <= height <= 100):
                    return image_url
            except Exception as e:
                pass

        return None
Example #32
0
    def pick_image_for_frame(self, frame_index: int = -1) -> List[utils.image_types, int]:
        if frame_index == -1:
            # print(f'Wizard::pick_image() for frame {frame_index} ==> (default) = {self.global_frame_index}')
            frame_index = self.global_frame_index

        closest,  result_handler_id = self.total_slide_duration, None
        for handler_id, image_handler in enumerate(self.image_handlers):
            indexes = image_handler.get_frame_indexes()

            if frame_index >= indexes[0]:
                dist = frame_index - indexes[0]
                if dist < closest:
                    closest, result_handler_id = dist, handler_id
        if result_handler_id is None:
            raise ValueError('No such image')
        result = self.image_handlers[result_handler_id].get_image()
        print(f'picked image.index = {result_handler_id}, with shape:', utils.get_image_size(result))
        return [
            cv2.resize(utils.convert_image_type(result, numpy.ndarray), self.frame_resolution),
            result_handler_id
        ]
Example #33
0
def scale_images_dir(src_images_dir_path: str, dest_images_dir_path: str, dest_size=(1920, 1080)) -> int:
    if not os.path.isdir(src_images_dir_path):
        raise FileNotFoundError(f'src_images_dir_path="{src_images_dir_path}" is not a directory')
    if os.path.isfile(dest_images_dir_path):
        raise OSError(f'dest_images_dir_path="{dest_images_dir_path}" is file, not directory')

    result = 0
    if src_images_dir_path[-1] not in ['\\', '/']:
        src_images_dir_path += os.path.sep
    if dest_images_dir_path[-1] not in ['\\', '/']:
        dest_images_dir_path += os.path.sep

    for next_file in os.listdir(src_images_dir_path):
        if next_file.split('.')[-1].lower() in ['jpg', 'png', 'bmp']:
            img = utils.load_image(src_images_dir_path + next_file, np.ndarray)
            if utils.get_image_size(img) == dest_size:
                utils.save_image(img, dest_images_dir_path)
            else:
                utils.save_image(img, cv2.resize(img, dest_size))
            result += 1
    return result
Example #34
0
 def _cut_objects(self, image_data):
     """
     Method cut labled objcet form image and save it as small image in tmp class folders.
     :param image_data: dict of info about image and all object on that image
     :type image_data: dict
     :retruns: None
     :rtype: None
     """
     full_image = cv2.imread(image_data['img_path'])
     for idx, i_object in enumerate(image_data['boxes']):
         cut_image = full_image[i_object['ymin']:i_object['ymax'],
                                i_object['xmin']:i_object['xmax']]
         cut_object_name = "".join([
             os.path.splitext(image_data['img_name'])[0], '_',
             str(idx), ".jpg"
         ])
         cut_object_path = os.path.join(
             self.dataset_classes_path_map[image_data['labels'][idx]],
             cut_object_name)
         width, height, depth = get_image_size(cut_image)
         if height > width:
             cut_image = ndimage.rotate(cut_image, -90)
         cv2.imwrite(cut_object_path, cut_image)
     return None
 def _read_text(self, text_path, uniq_dataset_classes=[]):
     """
     Method that extract information from text file and return dict, 
     and update uniq dataset classes list.
     :param text_path: path to text file
     :type text_path: str
     :param uniq_dataset_classes: list of uniq class name
     :type uniq_dataset_classes: list
     :return: tuple with dict data from text and list of uniq class name
     :rtype: tuple
     """
     output_dict = {}
     image_filepath, image_filename = self._get_img_name_and_img_path_from_text_filename(
         text_path)
     output_dict['img_name'] = image_filename
     output_dict['img_path'] = image_filepath
     width, height, depth = get_image_size(output_dict['img_path'])
     output_dict['width'] = width
     output_dict['height'] = height
     output_dict['depth'] = depth
     labels = []
     boxes = []
     for label_bbox_str in load_text_file_as_list(text_path):
         bbox = {}
         label_bbox_list = label_bbox_str.split()
         labels.append(uniq_dataset_classes[int(label_bbox_list[0])])
         bbox['xmin'] = int(width * float(label_bbox_list[1]))
         bbox['ymax'] = int(height * float(label_bbox_list[2]))
         bbox['xmax'] = int((float(label_bbox_list[3]) * width) +
                            bbox['xmin'])
         bbox['ymin'] = int((float(label_bbox_list[4]) * height) +
                            bbox['ymax'])
         boxes.append(bbox)
         output_dict['boxes'] = boxes
         output_dict['labels'] = labels
     return output_dict, uniq_dataset_classes
Example #36
0
def valid(weightfile, outfile, outdir):
    pool = ThreadPool(1)
    valid_images = args.valid
    name_list = args.names
    prefix = outdir
    names = utils.load_class_names(name_list)

    utils.set_default_context_by_args(args)

    with open(valid_images) as fp:
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]

    # Build the YOLO v2 network
    def create_losses(batchsize, imheight, imwidth, test=True):
        import gc
        gc.collect()
        nnabla_ext.cuda.clear_memory_cache()

        anchors = args.num_anchors
        classes = args.num_classes
        yolo_x = nnabla.Variable((batchsize, 3, imheight, imwidth))
        yolo_features = yolov2.yolov2(yolo_x, anchors, classes, test=test)
        return yolo_x, yolo_features

    yolo_x_nnabla, yolo_features_nnabla = create_losses(args.valid_batchsize,
                                                        args.height,
                                                        args.width,
                                                        test=True)
    nnabla.load_parameters(weightfile)

    valid_dataset = dataset.data_iterator_yolo(valid_images,
                                               args,
                                               train=False,
                                               shape=(args.width, args.height),
                                               shuffle=False,
                                               batch_size=args.valid_batchsize)
    assert (args.valid_batchsize > 1)

    fps = [0] * args.num_classes
    if not os.path.exists(outdir):
        os.mkdir(outdir)
    for i in range(args.num_classes):
        buf = '%s/%s%s.txt' % (prefix, outfile, names[i])
        fps[i] = open(buf, 'w')

    lineId = 0
    total_samples = len(valid_files)
    total_batches = (total_samples + args.valid_batchsize -
                     1) // args.valid_batchsize

    for each_batch in range(0, total_batches):
        ret = valid_dataset.next()
        data, target = ret
        yolo_x_nnabla.d = data
        yolo_features_nnabla.forward(clear_buffer=True)
        batch_boxes = utils.get_region_boxes(yolo_features_nnabla.d,
                                             args.conf_thresh,
                                             args.num_classes, args.anchors,
                                             args.num_anchors, 0, 1)
        for i in range(yolo_features_nnabla.d.shape[0]):
            if lineId >= total_samples:
                print("Reached End of total_samples")
                break
            fileId = os.path.basename(valid_files[lineId]).split('.')[0]
            width, height = utils.get_image_size(valid_files[lineId])
            print(valid_files[lineId])
            lineId += 1
            boxes = batch_boxes[i]
            boxes = utils.nms(boxes, args.nms_thresh)
            for box in boxes:
                x1 = (box[0] - box[2] / 2.0) * width
                y1 = (box[1] - box[3] / 2.0) * height
                x2 = (box[0] + box[2] / 2.0) * width
                y2 = (box[1] + box[3] / 2.0) * height

                det_conf = box[4]
                for j in range((len(box) - 5) // 2):
                    cls_conf = box[5 + 2 * j]
                    cls_id = box[6 + 2 * j]
                    prob = det_conf * cls_conf
                    fps[cls_id].write('%s %f %f %f %f %f\n' %
                                      (fileId, prob, x1, y1, x2, y2))

    for i in range(args.num_classes):
        fps[i].close()
def acMain(ac_version):
    # VARIABLES
    global totalDrivers
    global drivers

    global leaderboardWindow, driverWidget, driverComparisonWidget, fastest_lap_banner
    # LABELS
    global leaderboard
    global lapCountTimerLabel, leaderboardBaseLabel, leaderboardInfoBackgroundLabel, leaderboardBackgroundLabel
    global flagLabel

    totalDrivers = ac.getCarsCount()
    n_splits = ac.getTrackLength(0) / FC.TRACK_SECTION_LENGTH
    drivers = [Driver(i, n_splits) for i in range(totalDrivers)] # driver positions and update times

    ac.initFont(0, FC.FONT_NAME, 0, 0)

    leaderboardWindow = ac.newApp(FC.APP_NAME)
    ac.setTitle(leaderboardWindow, "")
    ac.drawBorder(leaderboardWindow, 0)
    ac.setIconPosition(leaderboardWindow, 0, -10000)
    ac.setSize(leaderboardWindow, 200, 200)
    ac.setBackgroundOpacity(leaderboardWindow, 0)

    # ===============================
    # Leaderboard Background
    leaderboardBaseLabel = ac.addLabel(leaderboardWindow, "")
    ac.setPosition(leaderboardBaseLabel, 0, 0)
    w, h = get_image_size(FC.LEADERBOARD_BASE_RACE)
    ac.setSize(leaderboardBaseLabel, w, h)
    ac.setBackgroundTexture(leaderboardBaseLabel, FC.LEADERBOARD_BASE_RACE)

    leaderboardBackgroundLabel = ac.addLabel(leaderboardWindow, "")
    ac.setPosition(leaderboardBackgroundLabel, 0, h)
    ac.setSize(leaderboardBackgroundLabel, w, totalDrivers*LeaderboardRow.ROW_HEIGHT + 2)
    ac.setBackgroundTexture(leaderboardBackgroundLabel, FC.LEADERBOARD_BACKGROUND);
    
    # ===============================
    # Lap Counter / Time
    lapCountTimerLabel = ac.addLabel(leaderboardWindow, "")
    ac.setPosition(lapCountTimerLabel, 74, 52)
    ac.setFontSize(lapCountTimerLabel, 22)
    ac.setCustomFont(lapCountTimerLabel, FC.FONT_NAME, 0, 1)
    ac.setFontAlignment(lapCountTimerLabel, "center")
    ac.setFontColor(lapCountTimerLabel, 0.86, 0.86, 0.86, 1)

    # ===============================
    # Flags
    flagLabel = ac.addLabel(leaderboardWindow, "")
    ac.setPosition(flagLabel, w, 8)
    ac.setSize(flagLabel, 110, h-8)
    ac.setVisible(flagLabel, 0)

    # ===============================
    # Info Background
    leaderboardInfoBackgroundLabel = ac.addLabel(leaderboardWindow, "")
    ac.setPosition(leaderboardInfoBackgroundLabel, w, h)
    ac.setSize(leaderboardInfoBackgroundLabel, 110, totalDrivers*LeaderboardRow.ROW_HEIGHT + 2)
    ac.setBackgroundTexture(leaderboardInfoBackgroundLabel, FC.LEADERBOARD_INFO_BACKGROUNG)

    info_button = ac.addButton(leaderboardWindow, "")
    ac.setPosition(info_button, w, h)
    ac.setSize(info_button, 110, totalDrivers*LeaderboardRow.ROW_HEIGHT + 2)
    ac.addOnClickedListener(info_button, on_click_info)
    ac.setBackgroundOpacity(info_button, 0)
    ac.drawBorder(info_button, 0)

    # ===============================
    # Driver Widget
    driverWidget = DriverWidget(FC.APP_NAME+" Driver")

    # ===============================
    # Driver Comparison Widget
    driverComparisonWidget = DriverComparisonWidget(FC.APP_NAME+" Driver Comparison")

    # ===============================
    # FastestLap Banner
    fastest_lap_banner = FastestLapBanner(FC.APP_NAME+" Top Banner")
    fastest_lap_banner.hide()

    leaderboard = [None] * totalDrivers
    for i in range(totalDrivers):
        leaderboard[i] = LeaderboardRow(leaderboardWindow, i)

    return FC.APP_NAME
Example #38
0
    def convert_to_coco(self, input_dir, output_file, output_image_dir=None):
        self._check_format(Format.COCO)
        ensure_dir(os.path.dirname(output_file))
        if output_image_dir is not None:
            ensure_dir(output_image_dir)
        images, categories, annotations = [], [], []
        category_name_to_id = {}
        data_key = self._data_keys[0]
        for item_idx, item in enumerate(self.iter_from_dir(input_dir)):
            image_path = item['input'][data_key]
            if not os.path.exists(image_path):
                if output_image_dir is None:
                    raise FileNotFoundError(
                        f'We can\'t find file by path {image_path}: if it is URL, please specify "output_image_dir"'
                        f'where downloaded images will be stored'
                    )
                try:
                    image_path = download(image_path, output_image_dir)
                except:
                    logger.error(f'Unable to download {image_path}. The item {item} will be skipped', exc_info=True)
                    continue
            width, height = get_image_size(image_path)
            image_id = len(images)
            images.append({
                'width': width,
                'height': height,
                'id': image_id,
                'file_name': image_path
            })
            bboxes = next(iter(item['output'].values()))
            for bbox in bboxes:
                category_name = bbox['rectanglelabels'][0]
                if category_name not in category_name_to_id:
                    category_id = len(categories)
                    category_name_to_id[category_name] = category_id
                    categories.append({
                        'id': category_id,
                        'name': category_name
                    })
                category_id = category_name_to_id[category_name]
                annotation_id = len(annotations)
                x = int(bbox['x'] / 100 * width)
                y = int(bbox['y'] / 100 * height)
                w = int(bbox['width'] / 100 * width)
                h = int(bbox['height'] / 100 * height)
                annotations.append({
                    'id': annotation_id,
                    'image_id': image_id,
                    'category_id': category_id,
                    'segmentation': [],
                    'bbox': [x, y, w, h],
                    'ignore': 0,
                    'iscrowd': 0,
                    'area': w * h
                })

        with io.open(output_file, mode='w') as fout:
            json.dump({
                'images': images,
                'categories': categories,
                'annotations': annotations,
                'info': {
                    'year': datetime.now().year,
                    'version': '1.0',
                    'contributor': 'Label Studio'
                }
            }, fout, indent=2)
Example #39
0
    def __init__(
        self,
        image1: image_types or None = None,
        image2: image_types or None = None,
        dest_dir: str = 'rendered',
        frames_count=50,
        **kwargs
    ):  # -~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^-.
        """
        Base class for animations between images.
        :param image1: look at utils.image_types
        :param image2: look at utils.image_types
        :param dest_dir: there rendered frames will be saved
        :param frames_count: number of frames to render
        :param kwargs:
            usable keys:
              > parent_timeline: Timeline
              > prefix: str -> prefix in file name of rendered frame
              > extension: str -> extension of output file (rendered frame)
              > load_as: [PillowImage, np.ndarray] -> tells how image file should be opened

              > preprocess_first, preprocess_second, postprocess_first, postprocess_second:

                - list of callbacks to predicates or CircleList(list of callbacks/predicates) or ImageTravel instance
                - Type name of ImageTravel class  (type name, not instance or its __init__ method call!

        """
        '''self.fill_later = False
        if kwargs.get('fill_later', False) is True:
            self.fill_later = True
            return'''

        self.im1 = self.im2 = None
        if image1 is not None and image2 is not None:
            self.im1, self.im2 = utils.verify_alpha_channel(
                image1), utils.verify_alpha_channel(image2)
        '''if image1 is None and image2 is None:
            err = 'If params image1 is None and image2 is None, pass parent_timeline argument of Timeline instance'
            if 'parent_timeline' not in kwargs:
                raise AttributeError(err)
            if not isinstance(kwargs['parent_timeline'], RenderController.Timeline):
                raise AttributeError(err)'''

        if 'prefix' not in kwargs:
            class_name = str(self.__class__).split("'")[1][:-2]
            if '.' in class_name:
                self.prefix = class_name.split('.')[-1]
        else:
            self.prefix = kwargs['prefix']

        if 'extension' not in kwargs:
            kwargs['extension'] = 'png'
        else:
            if kwargs['extension'] not in ['jpg', 'png']:
                kwargs['extension'] = 'png'

        if 'frame_resolution' not in kwargs:
            if self.im1 is None:
                tl: RenderControllers.TimelineModel = kwargs.get(
                    'parent_timeline', False)
                if not tl:
                    raise AttributeError('Cannot obtain resolution of frame')
                kwargs['frame_resolution'] = tl.get_frame_resolution()
            else:
                kwargs['frame_resolution'] = utils.get_image_size(self.im1)

        self.dest_dir = dest_dir
        self.frames_count = frames_count
        self.kwargs = kwargs
        self.name_counter = 0

        if 'load_as' not in kwargs:
            self.kwargs['load_as'] = PillowImage
            if isinstance(image1, np.ndarray):
                self.kwargs['load_as'] = np.ndarray

        self.effects = {}
        self.frame_index = -1
Example #40
0
""" Do some animation, multiple blits, test draw_bitmap_2 """

import matplotlib.pyplot as plt
import matplotlib.image as mpimg

from bitmap import draw_bitmap_2, mask_from_bitmap

from utils import get_image_size
import numpy as np

img = mpimg.imread("../samples/rocks_red.jpg")

xres, yres = get_image_size(img)

alien = (mpimg.imread("../samples/Space_invader_dn_c.png") * 255) \
    .astype(np.uint8)
mask = mask_from_bitmap(alien)
alien2 = (mpimg.imread("../samples/Space_invader_up_c.png") * 255) \
    .astype(np.uint8)
mask2 = mask_from_bitmap(alien2)


alien = [alien, alien2]
mask = [mask, mask2]
# comment out to test animation without masks
# mask = [None, None]


def draw_fleet(fb, x, y, rows, columns, padding, alien, mask, img_idx):
    """ Draw table with same bitmap"""
    w, h = get_image_size(alien[img_idx])
Example #41
0
def valid(weightfile, outfile, outdir):
    pool = ThreadPool(1)
    valid_images = args.valid
    name_list = args.names
    prefix = outdir
    names = utils.load_class_names(name_list)

    utils.set_default_context_by_args(args)

    with open(valid_images) as fp:
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]

    # Build the YOLO v2 network
    def create_losses(batchsize, imheight, imwidth, test=True):
        import gc
        gc.collect()
        nnabla_ext.cuda.clear_memory_cache()

        anchors = args.num_anchors
        classes = args.num_classes
        yolo_x = nnabla.Variable((batchsize, 3, imheight, imwidth))
        yolo_features = yolov2.yolov2(yolo_x, anchors, classes, test=test)
        return yolo_x, yolo_features

    yolo_x_nnabla, yolo_features_nnabla = create_losses(args.valid_batchsize,
                                                        args.height,
                                                        args.width,
                                                        test=True)
    nnabla.load_parameters(weightfile)

    valid_dataset = dataset.listDataset(valid_images,
                                        args,
                                        train=False,
                                        shape=(args.width, args.height),
                                        shuffle=False)
    assert (args.valid_batchsize > 1)

    def batch_iter(it, batch_size):
        def list2np(t):
            imgs, labels = zip(*t)
            retimgs = np.zeros((len(imgs), ) + imgs[0].shape, dtype=np.float32)
            retlabels = np.zeros((len(labels), ) + labels[0].shape,
                                 dtype=np.float32)
            for i, img in enumerate(imgs):
                retimgs[i, :, :, :] = img
            for i, label in enumerate(labels):
                retlabels[i, :] = label
            return retimgs, retlabels

        retlist = []
        for i, item in enumerate(it):
            retlist.append(item)
            if i % batch_size == batch_size - 1:
                ret = list2np(retlist)
                # # Don't train for batches that contain no labels
                # # TODO: fix this
                # if not (np.sum(ret[1].numpy()) == 0):
                yield ret
                retlist = []
        # Excess data is discarded
        if len(retlist) > 0:
            ret = list2np(retlist)
            # # Don't train for batches that contain no labels
            # # TODO: fix this
            # if not (np.sum(ret[1].numpy()) == 0):
            yield ret

    valid_loader = batch_iter(iter(valid_dataset),
                              batch_size=args.valid_batchsize)

    fps = [0] * args.num_classes
    if not os.path.exists(outdir):
        os.mkdir(outdir)
    for i in range(args.num_classes):
        buf = '%s/%s%s.txt' % (prefix, outfile, names[i])
        fps[i] = open(buf, 'w')

    lineId = -1

    future_data = pool.apply_async(utils.raise_info_thread(next),
                                   (valid_loader, None))
    for batch_idx in itertools.count():
        curr_data = future_data
        future_data = pool.apply_async(utils.raise_info_thread(next),
                                       (valid_loader, None))
        ret = curr_data.get()
        if ret is None:
            break
        data, target = ret
        yolo_x_nnabla.d = data
        yolo_features_nnabla.forward(clear_buffer=True)
        batch_boxes = utils.get_region_boxes(yolo_features_nnabla.d,
                                             args.conf_thresh,
                                             args.num_classes, args.anchors,
                                             args.num_anchors, 0, 1)
        for i in range(yolo_features_nnabla.d.shape[0]):
            lineId = lineId + 1
            fileId = os.path.basename(valid_files[lineId]).split('.')[0]
            width, height = utils.get_image_size(valid_files[lineId])
            print(valid_files[lineId])
            boxes = batch_boxes[i]
            boxes = utils.nms(boxes, args.nms_thresh)
            for box in boxes:
                x1 = (box[0] - box[2] / 2.0) * width
                y1 = (box[1] - box[3] / 2.0) * height
                x2 = (box[0] + box[2] / 2.0) * width
                y2 = (box[1] + box[3] / 2.0) * height

                det_conf = box[4]
                for j in range((len(box) - 5) // 2):
                    cls_conf = box[5 + 2 * j]
                    cls_id = box[6 + 2 * j]
                    prob = det_conf * cls_conf
                    fps[cls_id].write('%s %f %f %f %f %f\n' %
                                      (fileId, prob, x1, y1, x2, y2))

    for i in range(args.num_classes):
        fps[i].close()