Exemple #1
0
    def run_unfinished(max_retries, tasks_module):
        from runner import DirectRunner
        from filenames import fn_formatter  # user's fn_formatter
        from logs import setup_logger

        logger = setup_logger("rerunner",
                              "%s/_logs/rerunner.txt" % os.environ["PREFIX"])
        num_rerun = 0

        for p in db.Pipeline.select():
            pruns = db.PipelineRun.select()
            pruns = pruns.where(db.PipelineRun.pipeline_id == p.id
                                and db.PipelineRun.scheduled)
            pruns = pruns.where((db.PipelineRun.state == RUNNING)
                                or (db.PipelineRun.state == FAILED))
            pruns = pruns.where(db.PipelineRun.retries <= max_retries)
            pruns = pruns.order_by(db.PipelineRun.start_time.desc())

            for pr in pruns.limit(1):
                num_rerun += 1
                ppl = Pipeline(p.id, pr.run_id)
                dst_task_ids = json.loads(ppl.final_tasks)
                kw = json.loads(ppl.args)
                logger.info("re-run %s (%s) with args %s" %
                            (pr.run_id, ", ".join(dst_task_ids), ppl.args))

                runner = DirectRunner(tasks_module, kw, fn_formatter(kw))
                ppl.run(dst_task_ids, runner)

        logger.info("re-ran %i pipelines" % num_rerun)
Exemple #2
0
    config = configparser.ConfigParser()
    config.read('config.ini')
    optimizer_type = config['Parameter']['optimizer']
    lr = float(config['Parameter']['lr'])
    batch_size = int(config['Parameter']['batch_size'])
    epochs = int(config['Parameter']['epochs'])
    train_size = int(config['Parameter']['train_size'])
    augumentation = config['Parameter'].getboolean('augumentation')
    clip = float(config['Parameter']['clip'])
    train_path = config['Paths']['train_path']
    test_path = config['Paths']['test_path']
    test_kvasir_path = f'{test_path}/Kvasir'
    model_name = config['Parameter']['model']
    log_file = 'logs/train_' + model_name + '_' + datetime.now().strftime(
        '%Y%m%d%H') + '.log'
    train_logger = setup_logger('train_logger', log_file)
    model = ResNetPD().cuda()
    print('Params: ', get_n_params(model))
    train_logger.info(f'Params: {get_n_params(model)}')
    train_logger.info(
        f'optimizer: {optimizer_type}, lr: {lr}, batch_size: {batch_size}, image_size: {train_size}'
    )
    params = model.parameters()
    if optimizer_type == 'Adam':
        optimizer = torch.optim.Adam(params, lr)
    else:
        optimizer = torch.optim.SGD(params,
                                    lr,
                                    weight_decay=1e-4,
                                    momentum=0.9)
Exemple #3
0
        dice = 2 * TP / (num_obj + num_pred)
        specif = TN / (TN + FP)
        fmeasure = ((2.0 * pre * recall) / (pre + recall))  # beta = 1.0

    return dice, iou


if __name__ == '__main__':
    config = configparser.ConfigParser()
    config.read('config.ini')
    test_path = config['Paths']['test_path']
    # specify model_result_path
    eval_result_path = config['Paths']['eval_result_path']
    print(eval_result_path)
    log_file = 'logs/eval_cpd' + '.log'
    eval_logger = setup_logger('eval_logger', log_file)
    datasets = [
        'CVC-ClinicDB', 'CVC-ColonDB', 'ETIS-LaribPolypDB', 'Kvasir', 'CVC-300'
    ]
    # datasets = ['Kvasir', 'CVC-ClinicDB']
    print('Model: ', eval_result_path)
    for data_name in datasets:
        image_root = f'{eval_result_path}/{data_name}/'
        gt_root = f'{test_path}/{data_name}/masks/'
        eval_loader = eval_dataset(image_root, gt_root)
        # thresholds 1:0:-1/255
        thresholds = np.linspace(1, 0, 256)
        # thresholds = [0.5]
        threshold_dice, threshold_IoU = np.zeros(
            (eval_loader.size, len(thresholds))), np.zeros(
                (eval_loader.size, len(thresholds)))
Exemple #4
0

if __name__ == '__main__':
    config = configparser.ConfigParser()
    config.read('config.ini')
    optimizer_type = config['Parameter']['optimizer']
    lr = float(config['Parameter']['lr'])
    batch_size = int(config['Parameter']['batch_size'])
    epochs = int(config['Parameter']['epochs'])
    train_size = int(config['Parameter']['train_size'])
    augumentation = config['Parameter'].getboolean('augumentation')
    clip = float(config['Parameter']['clip'])
    train_path = config['Paths']['train_path']
    test_path = config['Paths']['test_path']
    test_kvasir_path = f'{test_path}/Kvasir'
    train_logger = setup_logger(
        'train_logger', 'logs/train_prahardnet_0805.log')

    model = PraHarDNet().cuda()
    params = model.parameters()
    if optimizer_type == 'Adam':
        optimizer = torch.optim.Adam(params, lr)
    else:
        optimizer = torch.optim.SGD(
            params, lr, weight_decay=1e-4, momentum=0.9)

    image_root = '{}/images/'.format(train_path)
    gt_root = '{}/masks/'.format(train_path)

    train_loader = get_loader(image_root, gt_root, batchsize=batch_size, trainsize=train_size, augmentation=augumentation)
    total_step = len(train_loader)

if __name__ == '__main__':
    config = configparser.ConfigParser()
    config.read('config.ini')
    optimizer_type = config['Parameter']['optimizer']
    lr = float(config['Parameter']['lr'])
    batch_size = int(config['Parameter']['batch_size'])
    epochs = int(config['Parameter']['epochs'])
    train_size = int(config['Parameter']['train_size'])
    augumentation = config['Parameter'].getboolean('augumentation')
    clip = float(config['Parameter']['clip'])
    train_path = config['Paths']['train_path']
    test_path = config['Paths']['test_path']
    test_kvasir_path = f'{test_path}/Kvasir'
    train_logger = setup_logger('train_logger', 'logs/train_mseg.log')

    model = SwinCPD().cuda()
    params = model.parameters()
    if optimizer_type == 'Adam':
        optimizer = torch.optim.Adam(params, lr)
    else:
        optimizer = torch.optim.SGD(params, lr, weight_decay=1e-4, momentum=0.9)
    
    image_root = '{}/images/'.format(train_path)
    gt_root = '{}/masks/'.format(train_path)

    train_loader = get_loader(image_root, gt_root, batchsize=batch_size, trainsize=train_size, augmentation=augumentation)
    total_step = len(train_loader)

    print('#'*20, 'Start Training', '#'*20) 
Exemple #6
0
DRY_RUN_ENABLED = "--dry-run" in sys.argv
DEBUG_ENABLED = "--debug" in sys.argv

MATCH_VODS_ONLY = "--match-vods-only" in sys.argv
IGNORE_FILE_SIZE_AND_AGE = "--no-size-age" in sys.argv

# Google APIs reset quota at midnight PT
pacific_tz = pytz.timezone("America/Los_Angeles")

ROOT_DIR = os.path.dirname(os.path.abspath(__file__ + "/.."))

STATE_FILE_PATH = ROOT_DIR + "/data/state.json"
CONFIG_FILE_PATH = ROOT_DIR + "/data/config.json"
UPLOAD_HISTORY_PATH = ROOT_DIR + "/data/upload_history.txt"

logger = setup_logger(debug_enabled=DEBUG_ENABLED)


def watch_recordings_folder(google: dict):
    """
    Watches the recodings folder for new video files to show up that need to be uploaded.
    Once a Twitch VOD corresponding to a video file is found, the video is uploaded using
    the metadata from the Twitch VOD as it's own.

    Refreshes the Twitch VOD information every twitch_vod_refresh_rate seconds (specified in config.json).
    If no YouTube API quota remains, sleeps until midnight PT (+ 10 minutes to be safe).
    """

    logger.debug(f"config: {config}")

    folder_to_move_completed_uploads = config[
Exemple #7
0
if __name__ == '__main__':
    config = configparser.ConfigParser()
    config.read('config.ini')
    optimizer_type = config['Parameter']['optimizer']
    lr = float(config['Parameter']['lr'])
    batch_size = int(config['Parameter']['batch_size'])
    epochs = int(config['Parameter']['epochs'])
    train_size = int(config['Parameter']['train_size'])
    augumentation = config['Parameter'].getboolean('augumentation')
    clip = float(config['Parameter']['clip'])
    train_path = config['Paths']['train_path']
    test_path = config['Paths']['test_path']

    test_kvasir_path = f'{test_path}/Kvasir'
    train_logger = setup_logger('train_logger', 'logs/train_effnetv2_2704.log')

    model = EffNetV2SCPD()
    weights = torch.load('checkpoints/effnetv2scpd_')
    model.load_state_dict(weights)
    model = model.cuda()
    params = model.parameters()
    if optimizer_type == 'Adam':
        optimizer = torch.optim.Adam(params, lr)
    else:
        optimizer = torch.optim.SGD(params,
                                    lr,
                                    weight_decay=1e-4,
                                    momentum=0.9)

    image_root = '{}/images/'.format(train_path)