Beispiel #1
0
    def modify_tags(self):
        neptune.append_tags("tag1")
        neptune.append_tag(["tag2_to_remove", "tag3"])
        neptune.remove_tag("tag2_to_remove")
        neptune.remove_tag("tag4_remove_non_existing")

        exp = neptune.get_experiment()
        assert set(exp.get_tags()) == {
            "initial tag 1", "initial tag 2", "tag1", "tag3"
        }
Beispiel #2
0
def do_main():
    neptune.init('ods/wheat')
    # Create experiment with defined parameters
    neptune.create_experiment(name=model_name,
                              params=PARAMS,
                              tags=[experiment_name, experiment_tag],
                              upload_source_files=[os.path.basename(__file__)])

    neptune.append_tags(f'fold_{fold}')

    device = torch.device(f'cuda:{gpu_number}') if torch.cuda.is_available(
    ) else torch.device('cpu')
    print(device)

    print(len(train_boxes_df))
    print(len(train_images_df))

    # Leave only > 0
    print('Leave only train images with boxes (validation)')
    with_boxes_filter = train_images_df[image_id_column].isin(
        train_boxes_df[image_id_column].unique())

    negative_images = enumerate_images(DIR_NEGATIVE)
    negative_images = [(negative_prefix + filename[:-4])
                       for filename in negative_images]
    negative_images.sort()
    # take first 100 now...
    negative_images = negative_images[:100]
    """
    spike_images = enumerate_images(DIR_SPIKE)
    spike_images = [(spike_dataset_prefix + filename[:-4]) for filename in spike_images]
    spike_images.sort()
    assert len(spike_images) > 0
    """

    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    load_weights(net,
                 '../timm-efficientdet-pytorch/efficientdet_d5-ef44aea8.pth')

    config.num_classes = 1
    config.image_size = our_image_size
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))

    model_train = DetBenchTrain(net, config)
    model_eval = DetBenchEval(net, config)

    manager = ModelManager(model_train, model_eval, device)

    pretrained_weights_file = 'pretrained.pth'

    images_val = train_images_df.loc[(train_images_df[fold_column] == fold)
                                     & with_boxes_filter,
                                     image_id_column].values
    images_train = train_images_df.loc[(train_images_df[fold_column] != fold),
                                       image_id_column].values

    #images_train = list(images_train) + list(negative_images) + list(spike_images)
    images_train = list(images_train) + list(negative_images)
    print(len(images_train), len(images_val))

    train_dataset = WheatDataset(images_train,
                                 DIR_TRAIN,
                                 train_boxes_df,
                                 transforms=get_train_transform(),
                                 is_test=False)
    valid_dataset = WheatDataset(images_val,
                                 DIR_TRAIN,
                                 train_boxes_df,
                                 transforms=get_valid_transform(),
                                 is_test=True)

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=train_batch_size,
                                   shuffle=True,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn,
                                   drop_last=True)

    valid_data_loader = DataLoader(valid_dataset,
                                   batch_size=inf_batch_size,
                                   shuffle=False,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    weights_file = f'{experiment_name}.pth'
    if os.path.exists(pretrained_weights_file):
        # continue training
        print('Continue training, loading weights: ' + pretrained_weights_file)
        load_weights(net, pretrained_weights_file)

    manager.run_train(train_data_loader,
                      valid_data_loader,
                      n_epoches=n_epochs,
                      weights_file=weights_file,
                      factor=factor,
                      start_lr=start_lr,
                      min_lr=min_lr,
                      lr_patience=lr_patience,
                      overall_patience=overall_patience,
                      loss_delta=loss_delta)

    # add tags
    neptune.log_text('save checkpoints as', weights_file[:-4])
    neptune.stop()
Beispiel #3
0
    'start_lr': start_lr,
    'min_lr': min_lr,
    'lr_patience': lr_patience,
    'overall_patience': overall_patience,
    'loss_delta': loss_delta,
    'experiment_tag': experiment_tag,
}

# Create experiment with defined parameters
neptune.create_experiment(
    name=model_name,
    params=PARAMS,
    tags=[experiment_name, experiment_tag, f'fold_{fold}'],
    upload_source_files=['model29.py', 'src/datasets/get_transforms.py'])

neptune.append_tags(['grad_accum'])

train_boxes_df = pd.read_csv(os.path.join(DATA_DIR, 'fixed_train.csv'))
train_images_df = pd.read_csv(os.path.join(DATA_DIR, 'orig_alex_folds.csv'))
train_images_df = pd.read_csv(
    os.path.join(DATA_DIR, 'alex_folds_with_negative.csv'))

train_boxes_df = preprocess_boxes(train_boxes_df)
# filter tiny boxes as well
#train_boxes_df = filter_box_size(train_boxes_df, min_size = 10)
#train_boxes_df['area'] = train_boxes_df['w'] * train_boxes_df['h']

train_boxes_df = pd.read_csv(os.path.join(DATA_DIR, 'fixed_train.csv'))
print('boxes original: ', len(train_boxes_df))
#spike_df = pd.read_csv(os.path.join(DATA_DIR, 'spike_train.csv'))
#train_boxes_df = pd.concat((train_boxes_df, spike_df), axis=0, sort=False)
# Initialize Neptune

import neptune

neptune.init(api_token="ANONYMOUS",
             project_qualified_name="shared/colab-test-run")

# Basic Example

neptune.create_experiment(name='basic-colab-example',
                          params={'learning_rate': 0.1})

neptune.log_metric('accuracy', 0.93)

neptune.append_tags(['basic', 'finished_successfully'])

# tests
exp = neptune.get_experiment()

neptune.stop()

# tests

if set(exp.get_logs().keys()) != set(['accuracy']):
    raise ValueError()

# Keras classification example [Advanced]

get_ipython().system(' pip install tensorflow==2.3.1 scikit-plot==0.3.7')
Beispiel #5
0
    'n_epochs': n_epochs,
    'factor': factor,
    'start_lr': start_lr,
    'min_lr': min_lr,
    'lr_patience': lr_patience,
    'overall_patience': overall_patience,
    'loss_delta': loss_delta,
}

# Create experiment with defined parameters
neptune.create_experiment(name=model_name,
                          params=PARAMS,
                          tags=[experiment_name, experiment_tag],
                          upload_source_files=['train_effdet_val.py'])

neptune.append_tags(f'fold_{fold}')

train_boxes_df = pd.read_csv(os.path.join(DATA_DIR, 'fixed_train.csv'))
train_images_df = pd.read_csv(os.path.join(DATA_DIR, 'orig_alex_folds.csv'))

train_boxes_df['x'] = -1
train_boxes_df['y'] = -1
train_boxes_df['w'] = -1
train_boxes_df['h'] = -1


def expand_bbox(x):
    r = np.array(re.findall("([0-9]+[.]?[0-9]*)", x))
    if len(r) == 0:
        r = [-1, -1, -1, -1]
    return r
    'lr_patience': lr_patience,
    'overall_patience': overall_patience,
    'loss_delta': loss_delta,
    'augs': 'd4'
}

# Create experiment with defined parameters
neptune.create_experiment(
    name=model_name,
    params=PARAMS,
    tags=[experiment_name, experiment_tag, f'fold_{fold}'],
    upload_source_files=[
        'train_effdet_val2.py', 'src/datasets/get_transforms.py'
    ])

neptune.append_tags(['augs_d4'])

train_boxes_df = pd.read_csv(os.path.join(DATA_DIR, 'fixed_train.csv'))
train_images_df = pd.read_csv(os.path.join(DATA_DIR, 'orig_alex_folds.csv'))

train_boxes_df = preprocess_boxes(train_boxes_df)
# filter tiny boxes as well
#train_boxes_df = filter_box_size(train_boxes_df, min_size = 10)


def train_box_callback(image_id):
    records = train_boxes_df[train_boxes_df['image_id'] == image_id]
    return records[['x', 'y', 'w', 'h']].values


def split_prediction_string(str):
def do_main():
    neptune.init('ods/wheat')
    # Create experiment with defined parameters
    neptune.create_experiment(name=model_name,
                              params=PARAMS,
                              tags=[experiment_name, experiment_tag],
                              upload_source_files=[os.path.basename(__file__)])

    neptune.append_tags(f'fold_{fold}')
    neptune.append_tags(['grad_accum'])

    device = torch.device(f'cuda:{gpu_number}') if torch.cuda.is_available(
    ) else torch.device('cpu')
    print(device)

    print(len(train_boxes_df))
    print(len(train_images_df))

    # Leave only > 0
    print('Leave only train images with boxes (validation)')
    with_boxes_filter = train_images_df[image_id_column].isin(
        train_boxes_df[image_id_column].unique())

    # config models fro train and validation
    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    load_weights(net,
                 '../timm-efficientdet-pytorch/efficientdet_d5-ef44aea8.pth')

    config.num_classes = 1
    config.image_size = our_image_size
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))
    model_train = DetBenchTrain(net, config)
    model_eval = DetBenchEval(net, config)

    manager = ModelManager(model_train, model_eval, device)

    images_val = train_images_df.loc[(train_images_df[fold_column] == fold)
                                     & with_boxes_filter,
                                     image_id_column].values
    images_train = train_images_df.loc[(train_images_df[fold_column] != fold)
                                       & with_boxes_filter,
                                       image_id_column].values

    print(
        f'\nTrain images:{len(images_train)}, validation images {len(images_val)}'
    )

    # get augs
    #augs_dict = set_augmentations(our_image_size)

    # get datasets
    train_dataset = WheatDataset(
        image_ids=images_train[:160],
        image_dir=DIR_TRAIN,
        boxes_df=train_boxes_df,
        transforms=get_train_transform(our_image_size),
        is_test=False)
    valid_dataset = WheatDataset(
        image_ids=images_val[:160],
        image_dir=DIR_TRAIN,
        boxes_df=train_boxes_df,
        transforms=get_valid_transform(our_image_size),
        is_test=True)

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=train_batch_size,
                                   shuffle=True,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn,
                                   drop_last=True)

    valid_data_loader = DataLoader(valid_dataset,
                                   batch_size=inf_batch_size,
                                   shuffle=False,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    weights_file = f'../checkpoints/{model_name}/{experiment_name}.pth'

    #pretrain_weights_file = f'{checkpoints_dir}/{experiment_name}.pth'
    #if os.path.exists(pretrain_weights_file):
    #    print(f'Continue training, loading weights from {pretrain_weights_file}')
    #    load_weights(net, pretrain_weights_file)

    manager.run_train(train_generator=train_data_loader,
                      val_generator=valid_data_loader,
                      n_epoches=n_epochs,
                      weights_file=weights_file,
                      factor=factor,
                      start_lr=start_lr,
                      min_lr=min_lr,
                      lr_patience=lr_patience,
                      overall_patience=overall_patience,
                      loss_delta=loss_delta)

    # add tags
    neptune.log_text('save checkpoints as', weights_file[:-4])
    neptune.stop()