コード例 #1
0
ファイル: main.py プロジェクト: Jihadik/supervisely
                     settings['res_names']['project']), sly.OpenMode.CREATE)
    src_datasets = read_datasets(inst_dir)
    for ds_name, sample_names in src_datasets.items():
        ds = out_project.create_dataset(ds_name)  #make train -> img, ann
        progress = sly.Progress('Dataset: {!r}'.format(ds_name),
                                len(sample_names))  # for logger
        imgs_dir_new = os.path.join(imgs_dir, ds_name)
        inst_dir_new = os.path.join(inst_dir, ds_name)
        for name in sample_names:
            src_img_path = os.path.join(imgs_dir_new, name + '.jpg')
            inst_path = os.path.join(inst_dir_new, name + '.png')

            if all((os.path.isfile(x) or (x is None)
                    for x in [src_img_path, inst_path])):
                ann = get_ann(src_img_path, inst_path, number_class,
                              pixel_color)
                ds.add_item_file(name, src_img_path, ann=ann)
            progress.iter_done_report()

    out_meta = sly.ProjectMeta(obj_classes=classes_dict)
    out_project.set_meta(out_meta)


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('sceneparsing', main)
コード例 #2
0
ファイル: main.py プロジェクト: supervisely/supervisely
            rimg_infos = []
            for img_path, meta_json, hash in related_items:
                rimg_infos.append({ApiField.ENTITY_ID: item_info.id,
                                   ApiField.NAME: meta_json.get(ApiField.NAME, sly.fs.get_file_name_with_ext(img_path)),
                                   ApiField.HASH: hash,
                                   ApiField.META: meta_json.get(ApiField.META, {}) })
            api.pointcloud.add_related_images(rimg_infos)

        sly.fs.remove_dir(temp_dir)

        pass

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED', extra={'event_type': sly.EventType.PROJECT_CREATED, 'project_id': project_info.id})
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError("{} wasn't created: 0 files with supported formats were found. Supported formats: {!r}"
                           .format(temp_str, ALLOWED_POINTCLOUD_EXTENSIONS))
    pass


def main():
    add_pointclouds_to_project()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('POINTCLOUD_RAW_IMPORT', main)
コード例 #3
0
                init_fn = slim.assign_from_checkpoint_fn(
                    join(self.helper.paths.model_dir, 'model_weights',
                         'model.ckpt'),
                    variables_to_restore,
                    ignore_missing_vars=ignore_missing_vars)
                init_fn(sess)

        input_shape_hw = (self.input_size_wh[1], self.input_size_wh[0])
        train(data_dicts=self.tf_data_dicts,
              class_num=len(self.out_classes),
              input_size=input_shape_hw,
              lr=self.config['lr'],
              n_epochs=self.config['epochs'],
              num_clones=len(device_ids),
              iters_cnt=self.iters_cnt,
              val_every=self.config['val_every'],
              model_init_fn=init_model_fn,
              save_cback=dump_model)


def main():
    cv2.setNumThreads(0)
    x = DeepLabTrainer()
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.add_default_logging_into_file(logger, sly.TaskPaths().debug_dir)
    sly.main_wrapper('DEEPLAB_TRAIN', main)
コード例 #4
0
        res_meta = sly.ProjectMeta()
        for class_name in self.classes:
            res_meta.classes.add({
                'title': class_name,
                'shape': 'polygon',
                'color': sly.gen_new_color()
            })
        res_meta.img_tags.update(self.tags)
        res_meta.to_dir(out_pr_fs.project_path)
        logger.info('Found classes.',
                    extra={
                        'cnt': len(self.classes),
                        'classes': sorted(list(self.classes))
                    })
        logger.info('Created tags.',
                    extra={
                        'cnt': len(self.tags),
                        'tags': sorted(list(self.tags))
                    })


def main():
    importer = ImporterCityscapes()
    importer.convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('CITYSCAPES_IMPORT', main)
    progress = sly.Progress('Creating users...', len(new_users), app_logger)

    for user, user_data in new_users.items():
        api.user.create(login=user_data[LOGIN_COL_NAME],
                        password=user_data[PASSWORD_COL_NAME],
                        is_restricted=False)
        app_logger.info("User {!r} is created".format(
            user_data[LOGIN_COL_NAME]))
        progress.iter_done_report()

    sly.fs.silent_remove(local_csv_path)

    my_app.stop()


def main():
    sly.logger.info("Script arguments",
                    extra={
                        "TEAM_ID": TEAM_ID,
                        "WORKSPACE_ID": WORKSPACE_ID,
                        "INPUT_FILE": INPUT_FILE
                    })

    # Run application service
    my_app.run(initial_events=[{"command": "create_user_from_csv"}])


if __name__ == "__main__":
    sly.main_wrapper("main", main)
コード例 #6
0
ファイル: main.py プロジェクト: chrissem/supervisely_old
        5: (255, 0, 0),
        6: (0, 0, 255),
        7: (127, 0, 217),
        8: (248, 248, 248)
    }
    src_datasets = read_datasets(all_ann)
    for ds_name, sample_names in src_datasets.items():
        ds = out_project.create_dataset(ds_name)
        progress = sly.Progress('Dataset: {!r}'.format(ds_name),
                                len(sample_names))  # for logger
        for name in sample_names:
            src_img_path = os.path.join(all_img, name + '.png')
            inst_path = os.path.join(all_ann, name + '_mask' + '.png')
            if all((os.path.isfile(x) or (x is None)
                    for x in [src_img_path, inst_path])):
                ann = get_ann(src_img_path, inst_path, number_class,
                              pixel_color)
                ds.add_item_file(name, src_img_path, ann=ann)
            progress.iter_done_report()
    out_meta = sly.ProjectMeta(obj_classes=classes_dict)
    out_project.set_meta(out_meta)


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('PennFudan', main)
コード例 #7
0
ファイル: main.py プロジェクト: sunnielyu/supervisely
        'AGENT_HOST_DIR',
        'SERVER_ADDRESS',
        'ACCESS_TOKEN',
        'DOCKER_LOGIN',
        'DOCKER_PASSWORD',
        'DOCKER_REGISTRY',
    ]}
    args_opt = {x: os.getenv(x, def_val) for x, def_val in [
        ('WITH_LOCAL_STORAGE', 'true'),
        ('UPLOAD_RESULT_IMAGES', 'false'),
        ('PULL_ALWAYS', 'true'),
        ('DEFAULT_TIMEOUTS', 'true'),
        ('DELETE_TASK_DIR_ON_FINISH', 'true'),
        ('DELETE_TASK_DIR_ON_FAILURE', 'false'),
        ('CHECK_VERSION_COMPATIBILITY', 'false')
    ]}
    args = {**args_opt, **args_req}
    return args


def main(args):
    sly.logger.info('ENVS', extra={**args, 'DOCKER_PASSWORD': '******'})
    agent = Agent()
    agent.inf_loop()
    agent.wait_all()


if __name__ == '__main__':
    sly.add_default_logging_into_file(sly.logger, constants.AGENT_LOG_DIR())
    sly.main_wrapper('agent', main, parse_envs())
コード例 #8
0
ファイル: train.py プロジェクト: wpilibsuite/supervisely
        progress_dummy.iter_done_report()

        def dump_model(saver, sess, is_best, opt_data):
            self.saver = saver
            self.sess = sess
            self._save_model_snapshot(is_best, opt_data)

        train(self.tf_data_dicts,
              self.config['epochs'],
              self.config['val_every'],
              self.iters_cnt,
              self.config['validate_with_model_eval'],
              pipeline_config=self.tf_config,
              num_clones=len(device_ids),
              save_cback=dump_model,
              is_transfer_learning=(
                  self.config['weights_init_type'] == 'transfer_learning'))


def main():
    cv2.setNumThreads(0)
    x = ObjectDetectionTrainer()  # load model & prepare all
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly_logger.add_default_logging_into_file(logger,
                                                 sly.TaskPaths.DEBUG_DIR)
    sly.main_wrapper('TF_OBJECT_DETECTION_TRAIN', main)
コード例 #9
0
ファイル: servicer.py プロジェクト: yunweidashuju/supervisely
        'connection': {
            'server_address': None,
            'token': None,
            'task_id': None,
        },
    }

    new_settings = sly.json_load(sly.TaskPaths(determine_in_project=False).settings_path)
    logger.info('Input settings', extra={'settings': new_settings})
    sly.update_recursively(settings, new_settings)
    logger.info('Full settings', extra={'settings': settings})

    def model_creator():
        res = UnetV2FastApplier(settings={
            'device_id': settings['device_id']
        })
        return res

    image_cache = SimpleCache(settings['cache_limit'])
    serv_instance = AgentRPCServicer(logger=logger,
                                     model_creator=model_creator,
                                     apply_cback=single_img_pipeline,
                                     conn_settings=settings['connection'],
                                     cache=image_cache)
    serv_instance.run_inf_loop()


if __name__ == '__main__':
    cv2.setNumThreads(0)
    sly.main_wrapper('UNET_V2_SERVICE', serve)
コード例 #10
0
ファイル: main.py プロジェクト: wsgcode/supervisely
                                exc_str),
                            exc_info=True,
                            extra={
                                'exc_str': exc_str,
                                'dataset_name': ds_name,
                                'image': src_img_path,
                            })
                else:
                    sly.logger.warning(
                        "Processing '{}' skipped because no corresponding mask found."
                        .format(src_img_filename))

                progress.iter_done_report()
            sly.logger.info(
                'Dataset "{}" samples processing is done.'.format(ds_name),
                extra={})

        out_meta = sly.ProjectMeta(obj_classes=self.obj_classes)
        out_project.set_meta(out_meta)
        sly.logger.info('Pascal VOC samples processing is done.', extra={})


def main():
    importer = ImporterPascalVOCSegm()
    importer.convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('PASCAL_VOC_IMPORT', main)
コード例 #11
0
ファイル: train.py プロジェクト: zylearncoding/supervisely
            c_boxes,
            data_dict['sample_cnt'],

            vc_img_paths,
            vc_num_gt_boxes,
            vc_boxes,
            vdata_dict['sample_cnt'],

            int1D_to_p_int(device_ids),
            len(device_ids),
            self.config['data_workers']['train'],
            self.config['epochs'],
            train_steps,
            self.config.get('checkpoint_every', 1),
            self.layer_cutoff,
            1 if self.config['enable_augmentations'] else 0,  # with aug
            int(self.config['print_every_iter']),
            float(self.config['bn_momentum'])
        )


def main():
    x = YOLOTrainer()
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly_logger.add_default_logging_into_file(logger, sly.TaskPaths.DEBUG_DIR)
    sly.main_wrapper('YOLO_V3_TRAIN', main)
コード例 #12
0
ファイル: main.py プロジェクト: zylearncoding/supervisely
    imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'JPEGImages')
    inst_dir_trainval = os.path.join(sly.TaskPaths.DATA_DIR, 'trainval')
    labels_file_path = os.path.join(sly.TaskPaths.DATA_DIR, 'labels.txt')
    number_class, pixel_color = read_colors(labels_file_path)
    out_project = sly.Project(os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE)
    src_datasets = read_datasets(inst_dir_trainval)
    for ds_name, sample_names in src_datasets.items():
        ds = out_project.create_dataset(ds_name) #make train -> img, ann
        progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger

        for name in sample_names:
            src_img_path = os.path.join(imgs_dir, name + '.jpg')
            inst_path = os.path.join(inst_dir_trainval, name + '.mat')

            if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])):
                ann = get_ann(src_img_path, inst_path, number_class, pixel_color)
                ds.add_item_file(name, src_img_path, ann=ann)
            progress.iter_done_report()

    out_meta = sly.ProjectMeta(obj_classes=classes_dict)
    out_project.set_meta(out_meta)


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
  sly.main_wrapper('PASCAL_CONTEXT_IMPORT', main)
コード例 #13
0
            if len(related_items) != 0:
                rimg_infos = []
                for img_path, meta_json in related_items:
                    rimg_infos.append({
                        ApiField.ENTITY_ID:
                        pointcloud.id,
                        ApiField.NAME:
                        meta_json[ApiField.NAME],
                        ApiField.HASH:
                        path_info_map[img_path][ApiField.HASH],
                        ApiField.META:
                        meta_json[ApiField.META],
                    })
                api.pointcloud.add_related_images(rimg_infos)

    sly.logger.info('PROJECT_CREATED',
                    extra={
                        'event_type': sly.EventType.PROJECT_CREATED,
                        'project_id': project.id
                    })
    pass


def main():
    add_pointclouds_to_project()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('POINTCLOUD_SLY_IMPORT', main)
コード例 #14
0
        'connection': {
            'server_address': None,
            'token': None,
            'task_id': None,
        },
    }

    new_settings = sly.json_load(sly.TaskPaths(determine_in_project=False).settings_path)
    logger.info('Input settings', extra={'settings': new_settings})
    sly.update_recursively(settings, new_settings)
    logger.info('Full settings', extra={'settings': settings})

    def model_creator():
        res = DeeplabFastApplier(settings={
            'device_id': settings['device_id'],
        })
        return res

    image_cache = SimpleCache(settings['cache_limit'])
    serv_instance = AgentRPCServicer(logger=logger,
                                     model_creator=model_creator,
                                     apply_cback=single_img_pipeline,
                                     conn_settings=settings['connection'],
                                     cache=image_cache)
    serv_instance.run_inf_loop()


if __name__ == '__main__':
    cv2.setNumThreads(0)
    sly.main_wrapper('DEEPLAB_SERVICE', serve)
コード例 #15
0
ファイル: main.py プロジェクト: zylearncoding/supervisely
                full_mask_fp = join(ann_dir, mask_name)
                labels = read_mask_labels(full_mask_fp, classes_mapping,
                                          obj_class_collection)
                ann = ann.add_labels(labels)

            ds.add_item_file(image_name, full_img_fp, ann=ann)
        except Exception as e:
            exc_str = str(e)
            sly.logger.warn(
                'Input sample skipped due to error: {}'.format(exc_str),
                exc_info=True,
                extra={
                    'exc_str': exc_str,
                    'image': full_img_fp
                })
        progress.iter_done_report()

    if len(masks_map) > 0:
        masks_list = list(masks_map.values())
        sly.logger.warning(
            'Images for masks doesn\'t exist. Masks: {}'.format(masks_list))


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('BINARY_MASKS_IMPORT', main)
コード例 #16
0
ファイル: train.py プロジェクト: onisimchukv/supervisely
        def dump_model(saver, sess, is_best, opt_data):
            out_dir = self.helper.checkpoints_saver.get_dir_to_write()
            TrainConfigRW(out_dir).save(self.out_config)
            save_config(osp.join(out_dir, 'model.config'), self.tf_config)
            model_fpath = os.path.join(out_dir, 'model_weights', 'model.ckpt')
            saver.save(sess, model_fpath)

            self.helper.checkpoints_saver.saved(is_best, opt_data)

        train(self.tf_data_dicts,
              self.config['epochs'],
              self.config['val_every'],
              self.iters_cnt,
              self.config['validate_with_model_eval'],
              pipeline_config=self.tf_config,
              num_clones=len(device_ids),
              save_cback=dump_model)


def main():
    cv2.setNumThreads(0)
    x = SSDTrainer()  # load model & prepare all
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.add_default_logging_into_file(logger, sly.TaskPaths().debug_dir)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    sly.main_wrapper('SSD_TRAIN', main)
コード例 #17
0
import os

import supervisely_lib as sly
from supervisely_lib.metric.precision_recall_metric import PrecisionRecallMetric
from supervisely_lib.metric.iou_metric import IOU
from supervisely_lib.metric.common import check_class_mapping, CLASSES_MAPPING
from supervisely_lib.io.json import load_json_file


def main():
    settings = load_json_file(sly.TaskPaths.SETTINGS_PATH)
    sly.logger.info('Input settings:', extra={'config': settings})

    if IOU not in settings:
        raise RuntimeError(
            '"{}" field is missing. Please set Intersection over Union threshold'
            .format(IOU))

    metric = PrecisionRecallMetric(settings[CLASSES_MAPPING], settings[IOU])
    applier = sly.MetricProjectsApplier(metric, settings)
    check_class_mapping(applier.project_gt, applier.project_pred,
                        settings[CLASSES_MAPPING])
    applier.run_evaluation()
    metric.log_total_metrics()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.add_default_logging_into_file(sly.logger, sly.TaskPaths.DEBUG_DIR)
    sly.main_wrapper('METRIC_EVALUATION', main)
コード例 #18
0
ファイル: main.py プロジェクト: wangjirui/supervisely
                    sly.fs.silent_remove(path)
                #sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR)
                progress.iters_done_report(len(batch_names))
            else:
                api.image.upload_hashes(ds_info.id,
                                        batch_names,
                                        batch_hashes,
                                        progress_cb=progress.iters_done_report)

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED',
                        extra={
                            'event_type': sly.EventType.PROJECT_CREATED,
                            'project_id': project_info.id
                        })
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError("{} wasn't created: 0 files were added")
    pass


def main():
    add_images_to_project()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('IMPORT_IMAGES', main)
コード例 #19
0
ファイル: train.py プロジェクト: zylearncoding/supervisely
                    val_loss = metrics_values_val['loss']
                    model_is_best = val_loss < best_val_loss
                    if model_is_best:
                        best_val_loss = val_loss
                        sly.logger.info(
                            'It\'s been determined that current model is the best one for a while.'
                        )

                    self._save_model_snapshot(model_is_best,
                                              opt_data={
                                                  'epoch':
                                                  self.epoch_flt,
                                                  'val_metrics':
                                                  metrics_values_val,
                                              })

                    policy.reset_if_needed(val_loss, self.model)

            sly.logger.info("Epoch was finished",
                            extra={'epoch': self.epoch_flt})


def main():
    cv2.setNumThreads(0)  # important for pytorch dataloaders
    x = UnetV2Trainer()  # load model & prepare all
    x.train()


if __name__ == '__main__':
    sly.main_wrapper('UNET_V2_TRAIN', main)
コード例 #20
0
                            'link': line,
                        })
            progress.iter_done_report()


def main():
    global server_address, task_context, append_to_existing_project

    with open('/sly_task_data/task_settings.json') as json_file:
        task_settings = json.load(json_file)

    server_address = task_settings['server_address']
    headers['x-api-key'] = task_settings['api_token']
    task_context = get_task_context(task_settings['task_id'])
    append_to_existing_project = task_settings['append_to_existing_project']

    project_id = create_project_api(task_settings['res_names']['project'])

    data_dir = "/sly_task_data/data"
    files = [f for f in os.listdir(data_dir) if f.endswith('.txt')]
    for file in files:
        file_path = os.path.join(data_dir, file)
        process_dataset_links(project_id, file_path)

    with open('/sly_task_data/results/project_info.json', 'w') as outfile:
        json.dump({'project_id': project_id}, outfile)


if __name__ == '__main__':
    sly.main_wrapper('IMAGES_ONLY_IMPORT', main)
コード例 #21
0
# coding: utf-8

import cv2

import supervisely_lib as sly
from supervisely_lib.nn.hosted.deploy import ModelDeploy
from inference import DeeplabSingleImageApplier


def main():
    model_deploy = ModelDeploy(model_applier_cls=DeeplabSingleImageApplier)
    model_deploy.run()


if __name__ == '__main__':
    cv2.setNumThreads(0)
    sly.main_wrapper('DEEPLAB_SERVICE', main)
コード例 #22
0
    task_settings = json.load(open(sly.TaskPaths.SETTINGS_PATH, 'r'))
    try:
        project = sly.Project(sly.TaskPaths.DATA_DIR, sly.OpenMode.READ)
    except FileNotFoundError:
        possible_projects = sly.fs.get_subdirs(sly.TaskPaths.DATA_DIR)
        if len(possible_projects) != 1:
            raise RuntimeError(
                'Wrong input project structure, or multiple projects are passed.'
            )
        project = sly.Project(
            os.path.join(sly.TaskPaths.DATA_DIR, possible_projects[0]),
            sly.OpenMode.READ)
    except Exception as e:
        raise e

    sly.logger.info('Project info: {} dataset(s), {} images(s).'.format(
        len(project.datasets), project.total_items))
    project.validate()

    project.copy_data(sly.TaskPaths.RESULTS_DIR,
                      task_settings['res_names']['project'])


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('SLY_FORMAT_IMPORT', main)
コード例 #23
0
ファイル: main.py プロジェクト: supervisely/supervisely
                        'image_name': dicom_filename,
                    })
                skipped_count += 1

            dataset_progress.iter_done_report()

    sly.logger.info('Processed.',
                    extra={
                        'samples': samples_count,
                        'skipped': skipped_count
                    })

    if out_project.total_items == 0:
        raise RuntimeError(
            'Result project is empty! All input DICOM files have unreadable format!'
        )

    out_meta = sly.ProjectMeta(tag_metas=tag_metas)
    out_project.set_meta(out_meta)


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
    #@TODO: for debug
    #sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR)
    sly.main_wrapper('DICOM_TO_IMAGES_IMPORT', main)
コード例 #24
0
ファイル: main.py プロジェクト: wangjirui/supervisely
                _ = api.video.upload_hash(ds_info.id, item_name, hash,
                                          stream_index)
        except Exception as e:
            sly.logger.warning(
                "File skipped {!r}: error occurred during processing {!r}".
                format(original_path, str(e)))

    if project_info is not None:
        sly.logger.info('PROJECT_CREATED',
                        extra={
                            'event_type': sly.EventType.PROJECT_CREATED,
                            'project_id': project_info.id
                        })
    else:
        temp_str = "Project"
        if append_to_existing_project is True:
            temp_str = "Dataset"
        raise RuntimeError(
            "{} wasn't created: 0 files with supported codecs ({}) and containers ({}). It is a limitation for Community Edition (CE)."
            .format(temp_str, _SUPPORTED_CODECS, _SUPPORTED_CONTAINERS))
    pass


def main():
    add_videos_to_project()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('VIDEO_ONLY_IMPORT', main)
コード例 #25
0
ファイル: train.py プロジェクト: yunweidashuju/supervisely
        def dump_model(saver, sess, is_best, opt_data):
            out_dir = self.helper.checkpoints_saver.get_dir_to_write()
            TrainConfigRW(out_dir).save(self.out_config)
            save_config(osp.join(out_dir, 'model.config'), self.tf_config)
            model_fpath = os.path.join(out_dir, 'model_weights', 'model.ckpt')
            saver.save(sess, model_fpath)

            self.helper.checkpoints_saver.saved(is_best, opt_data)

        train(self.tf_data_dicts,
              self.config['epochs'],
              self.config['val_every'],
              self.iters_cnt,
              self.config['validate_with_model_eval'],
              pipeline_config=self.tf_config,
              num_clones=len(device_ids),
              save_cback=dump_model)


def main():
    cv2.setNumThreads(0)
    x = MaskRCNNTrainer()  # load model & prepare all
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.add_default_logging_into_file(logger, sly.TaskPaths().debug_dir)
    sly.main_wrapper('MASK_RCNN_TRAIN', main)
コード例 #26
0
ファイル: main.py プロジェクト: chrissem/supervisely_old
                     settings['res_names']['project']), sly.OpenMode.CREATE)
    src_datasets = read_datasets(inst_dir)
    for ds_name, sample_names in src_datasets.items():
        ds = out_project.create_dataset(ds_name)  #make train -> img, ann
        progress = sly.Progress('Dataset: {!r}'.format(ds_name),
                                len(sample_names))  # for logger
        imgs_dir_new = os.path.join(imgs_dir, ds_name)
        inst_dir_new = os.path.join(inst_dir, ds_name)
        for name in sample_names:
            src_img_path = os.path.join(imgs_dir_new, name + '.jpg')
            inst_path = os.path.join(inst_dir_new, name + '.png')

            if all((os.path.isfile(x) or (x is None)
                    for x in [src_img_path, inst_path])):
                ann = get_ann(src_img_path, inst_path, number_class,
                              pixel_color)
                ds.add_item_file(name, src_img_path, ann=ann)
            progress.iter_done_report()

    out_meta = sly.ProjectMeta(obj_classes=classes_dict)
    out_project.set_meta(out_meta)


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('COCO_stuff', main)
コード例 #27
0
ファイル: main.py プロジェクト: zylearncoding/supervisely
                img_name = name + '.jpg'
                src_img_path = os.path.join(self._imgs_dir(ds_name), img_name)
                inst_path = os.path.join(self._inst_dir(ds_name), name + '.png')

                try:
                    ann = self._generate_annotation(src_img_path, inst_path)
                    ds.add_item_file(img_name, src_img_path, ann=ann)
                except Exception as e:
                    exc_str = str(e)
                    sly.logger.warn('Input sample skipped due to error: {}'.format(exc_str), exc_info=True, extra={
                        'exc_str': exc_str,
                        'dataset_name': ds_name,
                        'image': src_img_path,
                    })
                progress.iter_done_report()
            sly.logger.info("Dataset '{}' samples processing is done.".format(ds_name), extra={})

        out_meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(self._class_id_to_object_class.values()))
        out_project.set_meta(out_meta)
        sly.logger.info("Mapillary samples processing is done.", extra={})


def main():
    importer = ImporterMapillary()
    importer.convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('MAPILLARY_IMPORT', main)
コード例 #28
0
    settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH)
    lists_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'DAVIS/ImageSets')
    imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'DAVIS/JPEGImages')
    inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'DAVIS/Annotations')
    src_datasets = read_datasets(lists_dir)
    out_project = sly.Project(os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']),
                              sly.OpenMode.CREATE)
    for ds_name, sample_names in src_datasets.items():
        ds = out_project.create_dataset(ds_name) #make train -> img, ann
        progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger
        for name in sample_names:
            src_img_path = os.path.join(imgs_dir, name + '.jpg')
            inst_path = os.path.join(inst_dir, name + '.png')
            if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])):
                ann = get_ann(src_img_path, inst_path)
                name = name.replace('/', '_')
                ds.add_item_file(name, src_img_path, ann=ann)
            progress.iter_done_report()
    out_meta = sly.ProjectMeta(obj_classes=classes_dict)
    out_project.set_meta(out_meta)


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
  sly.main_wrapper('DAVIS_2016', main)

コード例 #29
0
        sly.TaskPaths.DATA_DIR,
        'graz50_facade_dataset/graz50_facade_dataset/labels_full')
    out_project = sly.Project(
        os.path.join(sly.TaskPaths.RESULTS_DIR,
                     settings['res_names']['project']), sly.OpenMode.CREATE)
    src_datasets = read_datasets(all_ann)
    for ds_name, sample_names in src_datasets.items():
        ds = out_project.create_dataset(ds_name)  #make train -> img, ann
        progress = sly.Progress('Dataset: {!r}'.format(ds_name),
                                len(sample_names))  # for logger
        for name in sample_names:
            src_img_path = os.path.join(all_img, name + '.png')
            inst_path = os.path.join(all_ann, name + '.png')

            if all((os.path.isfile(x) or (x is None)
                    for x in [src_img_path, inst_path])):
                ann = get_ann(src_img_path, inst_path, default_classes_colors)
                ds.add_item_file(name, src_img_path, ann=ann)
            progress.iter_done_report()
    out_meta = sly.ProjectMeta(obj_classes=classes_dict)
    out_project.set_meta(out_meta)


def main():
    convert()
    sly.report_import_finished()


if __name__ == '__main__':
    sly.main_wrapper('ParisArt', main)
コード例 #30
0
ファイル: train.py プロジェクト: zylearncoding/supervisely
                samples=samples_lst,
                class_mapping=self.class_title_to_idx)
            self.datasets[the_name] = the_ds
            logger.info('Prepared dataset.',
                        extra={
                            'dataset_purpose': the_name,
                            'dataset_tag': the_tag,
                            'sample_cnt': len(samples_lst)
                        })

    def train(self):
        self.model.train(self.datasets['train'],
                         self.datasets['val'],
                         learning_rate=self.mask_rcnn_config.LEARNING_RATE,
                         epochs=self.config['epochs'],
                         layers=self.config['train_layers'],
                         out_config=self.out_config,
                         sly_checkpoints_saver=self.checkpoints_saver)


def main():
    x = MaskRCNNTrainer()  # load model & prepare all
    x.train()


if __name__ == '__main__':
    if os.getenv('DEBUG_LOG_TO_FILE', None):
        sly.sly_logger.add_default_logging_into_file(logger,
                                                     sly.TaskPaths.DEBUG_DIR)
    sly.main_wrapper('MASK_RCNN_MATTERPORT_TRAIN', main)