def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()

    unet1 = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet1)
    unet2 = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet2)

    unet3 = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet3)

    unet4 = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet4)

    unet5 = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet5)
    
    crit = Loss()
    sm1 = SegmentationModule(unet1, crit)
    sm2 = SegmentationModule(unet2, crit)
    sm3 = SegmentationModule(unet3, crit)
    sm4 = SegmentationModule(unet4, crit)
    sm5 = SegmentationModule(unet5, crit)

    test_augs = ComposeTest([PaddingCenterCropTest(384)])

    ac17 = AC17(
            root=args.data_root,
            augmentations=test_augs,
            img_norm=args.img_norm)
    
    loader_val = data.DataLoader(
        ac17,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    sm1.cuda()
    sm2.cuda()
    sm3.cuda()
    sm4.cuda()
    sm5.cuda()

    # Main loop
    evaluate(sm1, sm2, sm3, sm4, sm5, loader_val, args)

    print('Evaluation Done!')
コード例 #2
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
                              arch=args.arch_unet,
                              weights=args.weights_unet1)

    crit = DualLoss(mode='val')

    sm = SegmentationModule(crit, unet)
    test_augs = ComposeTest([PaddingCenterCropTest(256)])

    ac17 = AC17(root=args.data_root,
                augmentations=test_augs,
                img_norm=args.img_norm)

    loader_val = data.DataLoader(ac17,
                                 batch_size=1,
                                 shuffle=False,
                                 collate_fn=user_scattered_collate,
                                 num_workers=5,
                                 drop_last=True)

    sm.cuda()

    # Main loop
    evaluate(sm, loader_val, args)

    print('Evaluation Done!')
コード例 #3
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet)

    sm = SegmentationModule(None, unet)
    test_augs = ComposeTest([PaddingCenterCropTest(256)])

    lungdata = LungData(
            root=args.data_root,
            split='test', 
            augmentations=test_augs)

    loader_val = data.DataLoader(
        lungdata,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=0,
        drop_last=True)

    sm.cuda()

    # Main loop
    evaluate(sm, loader_val, args)

    print('Evaluation Done!')
コード例 #4
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    
    unet = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet)

    crit = Loss()

    segmentation_module = SegmentationModule(unet, crit)
    
    test_augs = Compose([PaddingCenterCrop(224)])
    
    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
コード例 #5
0
ファイル: train.py プロジェクト: rexxxx1234/SAUNet-demo
def main(args):
    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
        arch=args.unet_arch,
        weights=args.weights_unet)

    print("Froze the following layers: ")
    for name, p in unet.named_parameters():
        if p.requires_grad == False:
            print(name)
    print()

    crit = DualLoss(mode="train")

    segmentation_module = SegmentationModule(crit, unet)

    test_augs = Compose([PaddingCenterCrop(256)])
    
    print("ready to load data")

    dataset_val = LungData( 
            root=args.data_root,
            split='test',
            k_split=args.k_split,
            augmentations=test_augs)

    
    loader_val = data.DataLoader(
        dataset_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    print(len(loader_val))

    # load nets into gpu
    if len(args.gpus) > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder, crit) if args.unet == False else (unet, crit)
    optimizers = create_optimizers(nets, args)

    '''
    # Start the webapp: user update a dcm file, output the predicted segmentation pic of it
    inp = gradio.inputs.DcmUpload(preprocessing_fn=preprocess)
    #inp = gradio.inputs.ImageUpload(preprocessing_fn=preprocess)
    io = gradio.Interface(inputs=inp, outputs="image", model_type="lung_seg", model=segmentation_module, args=args)
    io.launch(validate=False)
    '''

    iou, loss = eval(loader_val, segmentation_module, args, crit)
    print('Evaluation Done!')
コード例 #6
0
ファイル: train.py プロジェクト: rexxxx1234/SAUNet-demo
        args.ckpt = os.path.join(args.ckpt, args.id)
        if not os.path.isdir(args.ckpt):
            os.makedirs(args.ckpt)

        random.seed(args.seed)
        torch.manual_seed(args.seed)

        #main(args)

        ''''''
        #build model
        builder = ModelBuilder()

        unet = builder.build_unet(num_class=args.num_class,
            arch=args.unet_arch,
            weights=args.weights_unet)

        print("Froze the following layers: ")
        for name, p in unet.named_parameters():
            if p.requires_grad == False:
                print(name)
        print()

        crit = DualLoss(mode="train")

        segmentation_module = SegmentationModule(crit, unet)

        # load nets into gpu
        if len(args.gpus) > 1:
            segmentation_module = UserScatteredDataParallel(
コード例 #7
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    
    net_encoder = None
    net_decoder = None
    unet = None

    if args.unet == False:
        net_encoder = builder.build_encoder(
            arch=args.arch_encoder,
            fc_dim=args.fc_dim,
            weights=args.weights_encoder)
        net_decoder = builder.build_decoder(
            arch=args.arch_decoder,
            fc_dim=args.fc_dim,
            num_class=args.num_class,
            weights=args.weights_decoder,
            use_softmax=True)
    else:
        unet = builder.build_unet(num_class=args.num_class,
            arch=args.arch_unet,
            weights=args.weights_unet)

    #crit = nn.NLLLoss()
    crit = ACLoss()

    if args.unet == False:
        segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    else:
        segmentation_module = SegmentationModule(net_encoder, net_decoder, crit,
                                                is_unet=args.unet, unet=unet)
    '''
    # Dataset and Loader
    dataset_val = dl.loadVal()

    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=5,
        shuffle=False,
        num_workers=1,
        drop_last=True)
    '''
    test_augs = Compose([PaddingCenterCrop(256)])
    
    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
コード例 #8
0
ファイル: train.py プロジェクト: kunlqt/shape-attentive-unet
def main(args):
    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
        arch=args.unet_arch,
        weights=args.weights_unet)

    print("Froze the following layers: ")
    for name, p in unet.named_parameters():
        if p.requires_grad == False:
            print(name)
    print()

    crit = DualLoss(mode="train")

    segmentation_module = SegmentationModule(crit, unet)

    train_augs = Compose([PaddingCenterCrop(256), RandomHorizontallyFlip(), RandomVerticallyFlip(), RandomRotate(180)])
    test_augs = Compose([PaddingCenterCrop(256)])

    # Dataset and Loader
    dataset_train = AC17( #Loads 3D volumes
            root=args.data_root,
            split='train',
            k_split=args.k_split,
            augmentations=train_augs,
            img_norm=args.img_norm)
    ac17_train = load2D(dataset_train, split='train', deform=True) #Dataloader for 2D slices. Requires 3D loader.

    loader_train = data.DataLoader(
        ac17_train,
        batch_size=args.batch_size_per_gpu,
        shuffle=True,
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=True)

    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)

    ac17_val = load2D(dataset_val, split='val', deform=False)

    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    # load nets into gpu
    if len(args.gpus) > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder, crit) if args.unet == False else (unet, crit)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': [], 'jaccard': []}}
    best_val = {'epoch_1': 0, 'mIoU_1': 0,
                'epoch_2': 0, 'mIoU_2': 0,
                'epoch_3': 0, 'mIoU_3': 0,
                'epoch' : 0, 'mIoU': 0}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, loader_train, optimizers, history, epoch, args)
        iou, loss = eval(loader_val, segmentation_module, args, crit)
        #checkpointing
        ckpted = False
        if loss < 0.215:
            ckpted = True
        if iou[0] > best_val['mIoU_1']:
            best_val['epoch_1'] = epoch
            best_val['mIoU_1'] = iou[0]
            ckpted = True

        if iou[1] > best_val['mIoU_2']:
            best_val['epoch_2'] = epoch
            best_val['mIoU_2'] = iou[1]
            ckpted = True

        if iou[2] > best_val['mIoU_3']:
            best_val['epoch_3'] = epoch
            best_val['mIoU_3'] = iou[2]
            ckpted = True

        if (iou[0]+iou[1]+iou[2])/3 > best_val['mIoU']:
            best_val['epoch'] = epoch
            best_val['mIoU'] = (iou[0]+iou[1]+iou[2])/3
            ckpted = True

        if epoch % 50 == 0:
            checkpoint(nets, history, args, epoch)
            continue

        if epoch == args.num_epoch:
            checkpoint(nets, history, args, epoch)
            continue
        if epoch < 15:
            ckpted = False
        if ckpted == False:
            continue
        else:
            checkpoint(nets, history, args, epoch)
            continue
        print()

    print('Training Done!')
コード例 #9
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder=None
    net_decoder=None
    unet=None
    
    if args.unet == False:
        net_encoder = builder.build_encoder(
            arch=args.arch_encoder,
            fc_dim=args.fc_dim,
            weights=args.weights_encoder)
        net_decoder = builder.build_decoder(
            arch=args.arch_decoder,
            fc_dim=args.fc_dim,
            num_class=args.num_class,
            weights=args.weights_decoder)
    else:
        unet = builder.build_unet(num_class=args.num_class, 
            arch=args.unet_arch,
            weights=args.weights_unet)

        print("Froze the following layers: ")
        for name, p in unet.named_parameters():
            if p.requires_grad == False:
                print(name)
        print()
    
    crit = ACLoss(mode="train")
    #crit = nn.CrossEntropyLoss().cuda()
    #crit = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(50))
    #crit = nn.CrossEntropyLoss().cuda()
    #crit = nn.BCELoss()

    if args.arch_decoder.endswith('deepsup') and args.unet == False:
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder, crit, args.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder,  crit, is_unet=args.unet, unet=unet)

    train_augs = Compose([PaddingCenterCrop(256), RandomHorizontallyFlip(), RandomVerticallyFlip(), RandomRotate(180)])
    test_augs = Compose([PaddingCenterCrop(256)])
    # Dataset and Loader
    dataset_train = AC17(
            root=args.data_root,
            split='train',
            k_split=args.k_split,
            augmentations=train_augs,
            img_norm=args.img_norm)
    ac17_train = load2D(dataset_train, split='train', deform=True)
    
    loader_train = data.DataLoader(
        ac17_train,
        batch_size=args.batch_size_per_gpu,  # we have modified data_parallel
        shuffle=True, 
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=True)
    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)
    # create loader iterator
    #iterator_train = iter(loader_train)

    # load nets into gpu
    if len(args.gpus) > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()
    
    # Set up optimizers
    nets = (net_encoder, net_decoder, crit) if args.unet == False else (unet, crit)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': [], 'jaccard': []}}
    best_val = {'epoch_1': 0, 'mIoU_1': 0,
                'epoch_2': 0, 'mIoU_2': 0,
                'epoch_3': 0, 'mIoU_3': 0,
                'epoch' : 0, 'mIoU': 0}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, loader_train, optimizers, history, epoch, args)
        iou, loss = eval(loader_val, segmentation_module, args, crit)
        #checkpointing
        ckpted = False
        if loss < 0.215:
            ckpted = True
        if iou[0] > best_val['mIoU_1']:
            best_val['epoch_1'] = epoch
            best_val['mIoU_1'] = iou[0]
            ckpted = True

        if iou[1] > best_val['mIoU_2']:
            best_val['epoch_2'] = epoch
            best_val['mIoU_2'] = iou[1]
            ckpted = True

        if iou[2] > best_val['mIoU_3']:
            best_val['epoch_3'] = epoch
            best_val['mIoU_3'] = iou[2]
            ckpted = True
        
        if (iou[0]+iou[1]+iou[2])/3 > best_val['mIoU']:
            best_val['epoch'] = epoch
            best_val['mIoU'] = (iou[0]+iou[1]+iou[2])/3
            ckpted = True
        
        if epoch % 50 == 0:
            checkpoint(nets, history, args, epoch)
            continue

        if epoch == args.num_epoch:
            checkpoint(nets, history, args, epoch)
            continue
        if epoch < 15:
            ckpted = False
        if ckpted == False:
            continue
        else:
            checkpoint(nets, history, args, epoch)
            continue
        print()
    
    #print("[Val] Class 1: Epoch " + str(best_val['epoch_1']) + " had the best mIoU of " + str(best_val['mIoU_1']) + ".")
    #print("[Val] Class 2: Epoch " + str(best_val['epoch_2']) + " had the best mIoU of " + str(best_val['mIoU_2']) + ".")
    #print("[Val] Class 3: Epoch " + str(best_val['epoch_3']) + " had the best mIoU of " + str(best_val['mIoU_3']) + ".")
    print('Training Done!')
コード例 #10
0
ファイル: eval.py プロジェクト: sunjesse/ResNet34-UNet-HVS16
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()

    net_encoder = None
    net_decoder = None
    unet = None

    if args.unet == False:
        net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                            fc_dim=args.fc_dim,
                                            weights=args.weights_encoder)
        net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                            fc_dim=args.fc_dim,
                                            num_class=args.num_class,
                                            weights=args.weights_decoder,
                                            use_softmax=True)
    else:
        unet = builder.build_unet(num_class=args.num_class,
                                  arch=args.arch_unet,
                                  weights=args.weights_unet,
                                  use_softmax=True)

    crit = nn.NLLLoss()

    if args.unet == False:
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit)
    else:
        segmentation_module = SegmentationModule(net_encoder,
                                                 net_decoder,
                                                 crit,
                                                 is_unet=args.unet,
                                                 unet=unet)
    '''
    # Dataset and Loader
    dataset_val = dl.loadVal()

    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=5,
        shuffle=False,
        num_workers=1,
        drop_last=True)
    '''

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val, args, max_sample=args.num_val)

    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
コード例 #11
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = None
    net_decoder = None
    unet = None

    if args.unet == False:
        net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                            fc_dim=args.fc_dim,
                                            weights=args.weights_encoder)
        net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                            fc_dim=args.fc_dim,
                                            num_class=args.num_class,
                                            weights=args.weights_decoder)
    else:
        unet = builder.build_unet(num_class=args.num_class,
                                  arch=args.unet_arch,
                                  weights=args.weights_unet)

        print("Froze the following layers: ")
        for name, p in unet.named_parameters():
            if p.requires_grad == False:
                print(name)

    crit = nn.NLLLoss()
    #crit = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(50))
    #crit = nn.CrossEntropyLoss().cuda()
    #crit = nn.BCELoss()

    if args.arch_decoder.endswith('deepsup') and args.unet == False:
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit, args.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(net_encoder,
                                                 net_decoder,
                                                 crit,
                                                 is_unet=args.unet,
                                                 unet=unet)

    train_augs = Compose([
        RandomSized(224),
        RandomHorizontallyFlip(),
        RandomVerticallyFlip(),
        RandomRotate(180),
        AdjustContrast(cf=0.25),
        AdjustBrightness(bf=0.25)
    ])  #, RandomErasing()])
    #train_augs = None
    # Dataset and Loader
    dataset_train = TrainDataset(args.list_train,
                                 args,
                                 batch_per_gpu=args.batch_size_per_gpu,
                                 augmentations=train_augs)

    loader_train = data.DataLoader(
        dataset_train,
        batch_size=len(args.gpus),  # we have modified data_parallel
        shuffle=False,  # we do not use this param
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=False)

    print('1 Epoch = {} iters'.format(args.epoch_iters))
    # create loader iterator
    iterator_train = iter(loader_train)

    # load nets into gpu
    if len(args.gpus) > 1:
        segmentation_module = UserScatteredDataParallel(segmentation_module,
                                                        device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder, crit) if args.unet == False else (unet,
                                                                        crit)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': []}}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, iterator_train, optimizers, history, epoch,
              args)
        # checkpointing
        checkpoint(nets, history, args, epoch)

    print('Training Done!')
コード例 #12
0
def run_model(space):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = None
    net_decoder = None
    unet = None

    unet = builder.build_unet(num_class=4, arch='AlbuNet', weights='')

    crit = ACLoss()

    segmentation_module = SegmentationModule(net_encoder,
                                             net_decoder,
                                             crit,
                                             is_unet=True,
                                             unet=unet)

    train_augs = Compose([
        PaddingCenterCrop(224),
        RandomHorizontallyFlip(),
        RandomVerticallyFlip(),
        RandomRotate(180)
    ])
    test_augs = Compose([PaddingCenterCrop(224)])
    # Dataset and Loader
    dataset_train = AC17(root=os.getenv('DATA_ROOT',
                                        '/home/rexma/Desktop/MRI_Images/AC17'),
                         split='train',
                         k_split=1,
                         augmentations=train_augs,
                         img_norm=True)
    ac17_train = load2D(dataset_train, split='train', deform=False)

    loader_train = data.DataLoader(
        ac17_train,
        batch_size=4,  # we have modified data_parallel
        shuffle=True,
        num_workers=5,
        drop_last=True,
        pin_memory=True)

    dataset_val = AC17(root=os.getenv('DATA_ROOT',
                                      '/home/rexma/Desktop/MRI_Images/AC17'),
                       split='val',
                       k_split=1,
                       augmentations=test_augs,
                       img_norm=True)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(ac17_val,
                                 batch_size=1,
                                 shuffle=False,
                                 collate_fn=user_scattered_collate,
                                 num_workers=5,
                                 drop_last=True)

    segmentation_module.cuda()

    # Set up optimizers
    nets = (unet, crit)
    optimizers = create_optimizers(nets, space)

    val_losses = []
    train_losses = []
    status = STATUS_OK
    print("Searching " + "lr: " + str(space['lr'])
          )  #+ " b1: " + str(space['b1']) + "b2: " + str(space['b2']))
    # Main loop
    for epoch in range(1, 31):
        t_iou = train(segmentation_module, loader_train, optimizers, epoch,
                      space)
        v_iou = eval(loader_val, segmentation_module, crit)
        train_losses.append(t_iou)
        val_losses.append(v_iou)
        if epoch == 3 and v_iou >= 1.0:
            status = STATUS_FAIL
            break

    #values to be returned
    opt_name = 'lr' + str(
        space['lr'])  # + "_b1" + str(space['b1']) + "_b2" + str(space['b2'])
    model_dict = {'loss': min(val_losses), 'status': status, 'name': opt_name}
    return model_dict