Esempio n. 1
0
                    type=int,
                    help='Batch size')
parser.add_argument('-restore',
                    '--restore',
                    default='model_last.pth',
                    type=str)  # model_last.pth
parser.add_argument('-output_path', '--output_path', default='ckpts', type=str)
parser.add_argument('-prefix_path', '--prefix_path', default='', type=str)

path = os.path.dirname(__file__)

args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)

ckpts = args.makedir()
args.resume = os.path.join(ckpts, args.restore)  # specify the epoch


def main():
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    assert torch.cuda.is_available(), "Currently, we only support CUDA version"

    torch.manual_seed(args.seed)
    # torch.cuda.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    Network = getattr(models, args.net)  #
    model = Network(**args.net_params)
    model = torch.nn.DataParallel(model).to(device)
Esempio n. 2
0
parser.add_argument('-snapshot', '--snapshot', default=False, type=str2bool,
                    help='If True, saving the snopshot figure of all samples.')

parser.add_argument('-restore_prefix', '--restore_prefix', default=argparse.SUPPRESS, type=str,
                    help='The path to restore the model.')  # 'model_epoch_300.pth'
parser.add_argument('-restore_epoch', '--restore_epoch', default='399,499,599,699,799,899,999', type=str)
parser.add_argument('-out_dir', '--out_dir', default='/opt/ml/model/', type=str)
parser.add_argument('-prefix_path', '--prefix_path', default='', type=str)

path = os.path.dirname(__file__)

args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)
# args.gpu = str(args.gpu)
# ckpts = args.makedir()
args.resume = [args.restore_prefix + epoch + '.pth' for epoch in args.restore_epoch.split(',')]
# sample:
# CUDA_VISIBLE_DEVICES=1 python test_all.py --mode=1 --is_out=True --verbose=True --use_TTA=True --postprocess=True --snapshot=False --restore_prefix=./ckpts/DMFNet_pe_all/model_epoch_ --cfg=./ckpts/DMFNet_pe_all/cfg.yaml


def main():
    # setup environments and seeds
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    assert torch.cuda.is_available(), "Currently, we only support CUDA version"

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    Network = getattr(models, args.net)  #
#parser.add_argument('-cfg', '--cfg', default='deepmedic_ce_50_50_c25_redo', type=str)
#parser.add_argument('-cfg', '--cfg', default='deepmedic_ce_50_50_all', type=str)
parser.add_argument('-gpu', '--gpu', default='0', type=str)
parser.add_argument('-out', '--out', default='', type=str)

path = os.path.dirname(__file__)

## parse arguments
args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)
args.gpu = str(args.gpu)

ckpts = args.makedir()
resume = os.path.join(ckpts, 'model_last.tar')
if not args.resume and os.path.exists(resume):
    args.resume = resume


def main():
    # setup environments and seeds
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    # setup networks
    Network = getattr(models, args.net)
    model = Network(**args.net_params)
    model = model.cuda()
Esempio n. 4
0
                    help='Batch size')
parser.add_argument('-restore', '--restore', default='', type=str)  # model_last.pth
parser.add_argument('-output_path', '--output_path', default='ckpts', type=str)
parser.add_argument('-prefix_path', '--prefix_path', default='', type=str)
parser.add_argument('-aws', '--aws', default=False, type=bool)
parser.add_argument('-finetune', '--finetune', default=False, type=bool)

path = os.path.dirname(__file__)

## parse arguments
args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)
# args.net_params.device_ids= [int(x) for x in (args.gpu).split(',')]
ckpts = args.makedir()

args.resume = args.restore  # specify the epoch
if not args.restore:
    # load from /opt/ml/checkpoints
    local_path = '/opt/ml/checkpoints/'
    list_checkpoints = os.listdir(local_path)
    # get last checkpoints?
    if list_checkpoints:
        for f in list_checkpoints:
            try:
                shutil.copy2(os.path.join(local_path, f), ckpts)
            except:
                continue
        list_checkpoints.sort(key=len)
        list_checkpoints = [checkpoint for checkpoint in list_checkpoints if len(checkpoint) == len(list_checkpoints[-1])]
        list_checkpoints = sorted(list_checkpoints)
        last_checkpoints = list_checkpoints[-1]