Пример #1
0
    def _construct_and_fill_model(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir
        self.device_ids = sly.remap_gpu_devices([self.source_gpu_device])

        src_train_cfg_path = join(model_dir, 'model.cfg')
        with open(src_train_cfg_path) as f:
            src_config = f.readlines()

        def repl_batch(row):
            if 'batch=' in row:
                return 'batch=1\n'
            if 'subdivisions=' in row:
                return 'subdivisions=1\n'
            return row

        changed_config = [repl_batch(x) for x in src_config]

        inf_cfg_path = join(model_dir, 'inf_model.cfg')
        if not os.path.exists(inf_cfg_path):
            with open(inf_cfg_path, 'w') as f:
                f.writelines(changed_config)

        self.net = load_net(inf_cfg_path.encode('utf-8'),
                            join(model_dir, 'model.weights').encode('utf-8'),
                            0)
        logger.info('Weights are loaded.')
Пример #2
0
def serve():
    settings = {
        'device_id': 0,
        'cache_limit': 500,
        'connection': {
            'server_address': None,
            'token': None,
            'task_id': None,
        },
    }

    new_settings = sly.json_load(sly.TaskPaths(determine_in_project=False).settings_path)
    logger.info('Input settings', extra={'settings': new_settings})
    sly.update_recursively(settings, new_settings)
    logger.info('Full settings', extra={'settings': settings})

    def model_creator():
        res = UnetV2FastApplier(settings={
            'device_id': settings['device_id']
        })
        return res

    image_cache = SimpleCache(settings['cache_limit'])
    serv_instance = AgentRPCServicer(logger=logger,
                                     model_creator=model_creator,
                                     apply_cback=single_img_pipeline,
                                     conn_settings=settings['connection'],
                                     cache=image_cache)
    serv_instance.run_inf_loop()
Пример #3
0
    def _load_train_config(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir

        train_config_rw = TrainConfigRW(model_dir)
        if not train_config_rw.train_config_exists:
            raise RuntimeError(
                'Unable to run inference, config from training wasn\'t found.')
        self.train_config = train_config_rw.load()

        src_size = self.train_config['settings']['input_size']
        self.input_size_wh = (src_size['width'], src_size['height'])
        logger.info('Model input size is read (for auto-rescale).',
                    extra={
                        'input_size': {
                            'width': self.input_size_wh[0],
                            'height': self.input_size_wh[1]
                        }
                    })

        self.class_title_to_idx = self.train_config['class_title_to_idx']
        self.train_classes = sly.FigClasses(self.train_config['out_classes'])
        logger.info('Read model internal class mapping',
                    extra={'class_mapping': self.class_title_to_idx})
        logger.info('Read model out classes',
                    extra={'classes': self.train_classes.py_container})

        self.out_class_mapping = {
            x: self.class_title_to_idx[x]
            for x in (x['title'] for x in self.train_classes)
        }
Пример #4
0
    def _load_train_config(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir

        train_config_rw = TrainConfigRW(model_dir)
        if not train_config_rw.train_config_exists:
            raise RuntimeError(
                'Unable to run inference, config from training wasn\'t found.')
        self.train_config = train_config_rw.load()

        logger.info(
            'Model input size is read (for auto-rescale).',
            extra={
                'input_size': {
                    'width': 1200,
                    'height':
                    1200  # input shape is fixed for Faster with NasNet encoder
                }
            })

        self.class_title_to_idx = self.train_config['mapping']
        self.train_classes = sly.FigClasses(self.train_config['classes'])
        logger.info('Read model internal class mapping',
                    extra={'class_mapping': self.class_title_to_idx})
        logger.info('Read model out classes',
                    extra={'classes': self.train_classes.py_container})

        out_class_mapping = {
            x: self.class_title_to_idx[x]
            for x in (x['title'] for x in self.train_classes)
        }
        self.inv_mapping = inverse_mapping(out_class_mapping)
Пример #5
0
    def _construct_and_fill_model(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir
        self.device_ids = sly.remap_gpu_devices([self.source_gpu_device])
        self.model = create_model(n_cls=len(self.train_classes),
                                  device_ids=self.device_ids)

        self.model = WeightsRW(model_dir).load_strictly(self.model)
        self.model.eval()
        logger.info('Weights are loaded.')
Пример #6
0
 def _construct_and_fill_model(self):
     model_dir = sly.TaskPaths(determine_in_project=False).model_dir
     self.device_ids = sly.remap_gpu_devices([self.source_gpu_device])
     if 'model.pb' not in os.listdir(model_dir):
         logger.info('Freezing training checkpoint!')
         freeze_graph('image_tensor', model_dir + '/model.config',
                      model_dir + '/model_weights/model.ckpt', model_dir)
     self.detection_graph = create_detection_graph(model_dir)
     self.session = tf.Session(graph=self.detection_graph)
     logger.info('Weights are loaded.')
Пример #7
0
    def _construct_and_fill_model(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir
        self.device_ids = sly.remap_gpu_devices([self.source_gpu_device])

        logger.info('Will create model.')
        with tf.get_default_graph().as_default():
            img_np = tf.placeholder(tf.float32, shape=(None, None, 3))
            img_shape = tf.shape(img_np)

            w, h = self.input_size_wh
            img_np_4d = tf.expand_dims(img_np, axis=0)
            image_rs_4d = tf.image.resize_bilinear(img_np_4d, (h, w), align_corners=True)
            image_rs = tf.squeeze(image_rs_4d, axis=0)
            img = preprocess(image_rs, h, w)

            if 'model' in self.train_config and self.train_config['model'] == 'pspnet101':
                PSPNet = PSPNet101
                allign_corners = True
            else:
                PSPNet = PSPNet50
                allign_corners = False
            net = PSPNet({'data': img}, is_training=False, num_classes=len(self.train_classes))

            raw_output = net.layers['conv6']  # 4d

            # Predictions.
            raw_output_up = tf.image.resize_bilinear(raw_output,
                                                     size=[img_shape[0], img_shape[1]], align_corners=False)
            # raw_output_up = tf.argmax(raw_output_up, dimension=3)

            logger.info('Will load weights from trained model.')
            # Init tf Session
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            init = tf.global_variables_initializer()
            sess.run(init)
            loader = tf.train.Saver(var_list=tf.global_variables())

            # last_checkpoint = tf_saver.latest_checkpoint(output_train_dir)
            last_checkpoint = osp.join(model_dir, 'model.ckpt')
            loader.restore(sess, last_checkpoint)

            self.input_images = img_np
            self.predictions = raw_output_up
            self.sess = sess
        logger.info('Model has been created & weights are loaded.')
Пример #8
0
    def _construct_and_fill_model(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir
        self.device_ids = sly.remap_gpu_devices([self.source_gpu_device])
        initialized_model = namedtuple('Model',
                                       ['input_images', 'predictions', 'sess'])

        with tf.get_default_graph().as_default():
            img = tf.placeholder(tf.float32, shape=(None, None, 3))
            input_w, input_h = self.input_size_wh
            img_re = tf.image.resize_images(img, (input_h, input_w))
            original_image, image, label = input_preprocess.preprocess_image_and_label(
                img_re,
                None,
                crop_height=input_h,
                crop_width=input_w,
                is_training=False,
                model_variant="xception_65")

            image = tf.expand_dims(image, 0)

            model_options = ModelOptions(
                outputs_to_num_classes={'semantic': len(self.train_classes)},
                crop_size=(input_h, input_w),
                atrous_rates=[6, 12, 18],
                output_stride=16)

            predictions = model.predict_logits(
                tf.shape(img)[0:2],
                image,
                model_options=model_options,
                image_pyramid=None)

            predictions = predictions['semantic']
            saver = tf.train.Saver(slim.get_variables_to_restore())
            sess = tf.train.MonitoredTrainingSession(master='')
            saver.restore(sess, model_dir + '/model_weights/model.ckpt')

            initialized_model.input_images = img
            initialized_model.predictions = predictions
            initialized_model.sess = sess
        self.initialized_model = initialized_model
        logger.info('Weights are loaded.')
Пример #9
0
    def _load_train_config(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir
        train_config_rw = TrainConfigRW(model_dir)
        if not train_config_rw.train_config_exists:
            raise RuntimeError(
                'Unable to run inference, config from training wasn\'t found.')
        train_config = train_config_rw.load()

        self.train_classes = sly.FigClasses(train_config['out_classes'])
        tr_class_mapping = train_config['class_title_to_idx']

        # create
        rev_mapping = {v: k for k, v in tr_class_mapping.items()}
        self.train_names = [rev_mapping[i]
                            for i in range(len(rev_mapping))]  # ordered

        logger.info('Read model internal class mapping',
                    extra={'class_mapping': tr_class_mapping})
        logger.info('Read model out classes',
                    extra={'classes': self.train_classes.py_container})
Пример #10
0
    def _load_train_config(self):
        model_dir = sly.TaskPaths(determine_in_project=False).model_dir

        train_config_rw = TrainConfigRW(model_dir)
        if not train_config_rw.train_config_exists:
            raise RuntimeError(
                'Unable to run inference, config from training wasn\'t found.')
        self.train_config = train_config_rw.load()

        self.class_title_to_idx = self.train_config['mapping']
        self.train_classes = sly.FigClasses(self.train_config['classes'])
        logger.info('Read model internal class mapping',
                    extra={'class_mapping': self.class_title_to_idx})
        logger.info('Read model out classes',
                    extra={'classes': self.train_classes.py_container})

        out_class_mapping = {
            x: self.class_title_to_idx[x]
            for x in (x['title'] for x in self.train_classes)
        }
        self.inv_mapping = inverse_mapping(out_class_mapping)
Пример #11
0
                init_fn = slim.assign_from_checkpoint_fn(
                    join(self.helper.paths.model_dir, 'model_weights',
                         'model.ckpt'),
                    variables_to_restore,
                    ignore_missing_vars=ignore_missing_vars)
                init_fn(sess)

        input_shape_hw = (self.input_size_wh[1], self.input_size_wh[0])
        train(data_dicts=self.tf_data_dicts,
              class_num=len(self.out_classes),
              input_size=input_shape_hw,
              lr=self.config['lr'],
              n_epochs=self.config['epochs'],
              num_clones=len(device_ids),
              iters_cnt=self.iters_cnt,
              val_every=self.config['val_every'],
              model_init_fn=init_model_fn,
              save_cback=dump_model)


def main():
    cv2.setNumThreads(0)
    x = DeepLabTrainer()
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.add_default_logging_into_file(logger, sly.TaskPaths().debug_dir)
    sly.main_wrapper('DEEPLAB_TRAIN', main)
Пример #12
0
def main():
    logger.info('Hello world.')

    # It isn't necessary, but let's suppose that our data will be stored as for Supervisely task:
    # input in '/sly_task_data/data` and results in '/sly_task_data/results'.
    # So TaskPaths provides the paths.
    task_paths = sly.TaskPaths()

    in_pr_dir = task_paths.project_dir  # the paths includes project name

    in_pr_meta = sly.ProjectMeta.from_dir(in_pr_dir)
    # Now we've read meta of input project.
    logger.info('Input project meta: {} class(es).'.format(
        len(in_pr_meta.classes)))

    in_pr_fs = sly.ProjectFS.from_disk(
        *sly.ProjectFS.split_dir_project(in_pr_dir))
    # Now we've read project structure.
    logger.info(
        'Input project: "{}" contains {} dataset(s) and {} image(s).'.format(
            in_pr_fs.pr_structure.name, len(in_pr_fs.pr_structure.datasets),
            in_pr_fs.image_cnt))

    # It's convenient to create output project structure and store source file paths in ia_data.
    out_pr_structure = sly.ProjectStructure(
        'my_new_project')  # rename project... just for fun
    for item_descr in in_pr_fs:  # iterate over input project
        new_ia_data = {
            'src_ann_path': item_descr.ann_path,
            'src_img_path': item_descr.img_path,
            **item_descr.ia_data  # contains 'image_ext' which is required to write images
        }
        out_pr_structure.add_item(item_descr.ds_name, item_descr.image_name,
                                  new_ia_data)
    # ProjectFS will provide out file paths
    out_pr_fs = sly.ProjectFS(task_paths.results_dir, out_pr_structure)

    # We will add the rectangle to each annotation.
    new_class_title = 'new-region'
    rect_to_add = sly.Rect(left=20, top=20, right=50, bottom=100)

    # Ok, start processing.
    out_pr_fs.make_dirs()  # create all directories required for writing
    for item_descr in out_pr_fs:  # iterate over output project
        logger.info('Processing sample',
                    extra={
                        'dataset': item_descr.ds_name,
                        'image_name': item_descr.image_name
                    })

        # Copy image unchanged.
        sly.copy_file(item_descr.ia_data['src_img_path'], item_descr.img_path)

        # Read annotation.
        ann_packed = sly.json_load(item_descr.ia_data['src_ann_path'])
        ann = sly.Annotation.from_packed(ann_packed, in_pr_meta)

        # Add new figure to the annotation.
        # Method to construct figures returns iterable of new figures.
        # (e.g., line cropped with image bounds may produce some lines), but here we'll get not more than one figure
        # ...or no figures if image is less than 20x20.
        new_figures = sly.FigureRectangle.from_rect(new_class_title,
                                                    ann.image_size_wh,
                                                    rect_to_add)
        ann['objects'].extend(new_figures)

        # Save annotation.
        sly.json_dump(ann.pack(), item_descr.ann_path)

    # OK, and don't forget to create and save output project meta.
    # We'll save given data and add new class with shape "rectangle".
    out_pr_meta = deepcopy(in_pr_meta)
    out_pr_meta.classes.add({
        'title': new_class_title,
        'shape': 'rectangle',
        'color': '#FFFF00'
    })
    # Then store the meta.
    out_pr_meta.to_dir(out_pr_fs.project_path)

    logger.info('Done.')
Пример #13
0
 def _construct_and_fill_model(self):
     model_dir = sly.TaskPaths(determine_in_project=False).model_dir
     self.device_ids = sly.remap_gpu_devices([self.source_gpu_device])
     self.detection_graph = create_detection_graph(model_dir)
     self.session = tf.Session(graph=self.detection_graph)
     logger.info('Weights are loaded.')
Пример #14
0
def main():
    logger.info('Hello world.')

    # It isn't necessary, but let's suppose that our data will be stored as for Supervisely task:
    # input in '/sly_task_data/data` and results in '/sly_task_data/results'.
    # So TaskPaths provides the paths.
    task_paths = sly.TaskPaths()

    project_dir = task_paths.project_dir  # the paths includes project name

    project_meta = sly.ProjectMeta.from_dir(project_dir)
    # Now we've read meta of input project.
    logger.info('Input project meta: {} class(es).'.format(len(project_meta.classes)))

    project_fs = sly.ProjectFS.from_disk(*sly.ProjectFS.split_dir_project(project_dir))
    # Now we've read project structure.
    logger.info('Input project: "{}" contains {} dataset(s) and {} image(s).'.format(
        project_fs.pr_structure.name,
        len(project_fs.pr_structure.datasets),
        project_fs.image_cnt
    ))

    # prepare color mapping
    color_mapping = {}
    for cls_descr in project_meta.classes:
        color_s = cls_descr.get('color')
        if color_s is not None:
            color = sly.hex2rgb(color_s)  # use color from project meta if exists
        else:
            color = sly.get_random_color()  # or use random color otherwise
        color_mapping[cls_descr['title']] = color

    # enumerate all input samples (image/annotation pairs)
    for item_descr in project_fs:
        logger.info('Processing input sample',
                    extra={'dataset': item_descr.ds_name, 'image_name': item_descr.image_name})

        # Open image
        img = cv2.imread(item_descr.img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # to work with r-g-b colors

        # And read corresponding annotation.
        ann_packed = sly.json_load(item_descr.ann_path)
        ann = sly.Annotation.from_packed(ann_packed, project_meta)

        # Draw annotations on image
        for fig in ann['objects']:
            color = color_mapping.get(fig.class_title)
            fig.draw(img, color)
            # Note that this method draws lines with width 1, and points as single pixels.

        # Save image. Please note that we just save images, not new Supervisely project.
        src_image_ext = item_descr.ia_data['image_ext']  # let's preserve source image format (by extension)
        out_fpath = osp.join(
            task_paths.results_dir, item_descr.project_name, item_descr.ds_name, item_descr.image_name + src_image_ext
        )
        sly.ensure_base_path(out_fpath)  # create intermediate dirs if required

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)  # to work with r-g-b colors
        cv2.imwrite(out_fpath, img)  # write image

    logger.info('Done.')