예제 #1
0
                    help="Use cuda?")
parser.add_argument("--gpus", default=1, type=int, help="nums of gpu to use")
parser.add_argument("--threads",
                    default=8,
                    type=int,
                    help="Number of threads for data loader to use.")
parser.add_argument("--fixrandomseed",
                    default=False,
                    help="train with fix random seed")

# Pretrain model setting
parser.add_argument("--resume", type=str, help="Path to checkpoint.")

# Dataloader setting
parser.add_argument("--train",
                    default=["../dataset/NTIRE2018"],
                    type=str,
                    nargs='+',
                    help="path of training dataset")
parser.add_argument("--val",
                    default=["../dataset/NTIRE2018_VAL"],
                    type=str,
                    nargs='+',
                    help="path of validation dataset")

if __name__ == "__main__":
    import utils

    opt = parser.parse_args()
    utils.details(opt, None)
예제 #2
0
    ihazeparser.add_argument('--gt', help='Input directory for Clear Image', type=str,
                            default="/media/disk1/EdwardLee/dataset/IndoorTrainGT")
    ihazeparser.add_argument('--output', help='Output directory', type=str,
                            default="/media/disk1/EdwardLee/IndoorTrain")

    # O-Haze
    ohazeparser = domainparser.add_parser("O-Haze")
    ohazeparser.add_argument('--hazy', help='Input directory for Hazy Image', type=str,
                            default="/media/disk1/EdwardLee/dataset/OutdoorTrainHazy")
    ohazeparser.add_argument('--gt', help='Input directory for Clear Image', type=str,
                            default="/media/disk1/EdwardLee/dataset/OutdoorTrainGT")
    ohazeparser.add_argument('--output', help='Output directory', type=str,
                            default="/media/disk1/EdwardLee/OutdoorTrain")

    args = parser.parse_args()
    utils.details(args)

    # --------------------------------------------------------- #
    # Check folders here, make the directories if don't exist.  #
    # 0.  The image                                             #
    # 1.  Root folder                                           #
    #     1.1 gt                                                #
    #     1.2 hazy                                              #
    # --------------------------------------------------------- #
    if not os.path.exists(args.hazy):
        raise IOError("File doesn't not exist: {}".format(args.hazy))

    if not os.path.exists(args.gt):
        raise IOError("File doesn't not exist: {}".format(args.gt))

    # 1 Root Folder
예제 #3
0
    for path in opt.train:
        if not os.path.exists(path): 
            raise ValueError("{} doesn't exist".format(path))

    # Check validation dataset directory
    for path in opt.val:
        if not os.path.exists(path):
            raise ValueError("{} doesn't exist".format(path))

    # Make checkpoint storage directory
    name = "{}_{}".format(opt.tag, date.today().strftime("%Y%m%d"))
    os.makedirs(os.path.join(opt.checkpoints, name), exist_ok=True)

    # Copy the code of model to logging file
    if os.path.exists(os.path.join(opt.detail, name, 'model')):
        shutil.rmtree(os.path.join(opt.detail, name, 'model'))

    if os.path.exists(os.path.join(opt.checkpoints, name, 'model')):
        shutil.rmtree(os.path.join(opt.checkpoints, name, 'model'))

    shutil.copytree('./model', os.path.join(opt.detail, name, 'model'))
    shutil.copytree('./model', os.path.join(opt.checkpoints, name, 'model'))
    shutil.copyfile(__file__, os.path.join(opt.detail, name, os.path.basename(__file__)))

    # Show Detail
    print('==========> Training setting')
    utils.details(opt, os.path.join(opt.detail, name, 'args.txt'))

    # Execute main process
    main(opt)
예제 #4
0
        #loss
        labels = concat_labels.to(DEVICE)
        loss = criterion(log_probs, labels, tuple(lengths), tuple(len_of_labels))

        #count accuracy
        nl = loss.item()
        print("    ["+str(epoch)+","+str(vi)+"] loss: "+str(nl))
        if not math.isnan(nl):
            vloss += nl
        
        #vavdis += predictor.evaluateError(output, labels, len_of_labels)
        vtotal += output.shape[0]
        vi += 1

    _end_time = time.time()
    details((_end_time - _begin_time), ave_loss/i, avg_dis/total, vloss/vi, vavdis/vtotal)
    _begin_time = time.time()

    torch.save(model.state_dict(),"models/model-"+str(int(_begin_time))+"-"+str(int(vavdis/vtotal))+".pt")


result = []
model.eval()
test_loader = loader("data/transformed_test_data.npy")


for voice,lengths in test_loader:
    #evaluate
    output = model(voice)
    preds = predictor.predict(output)
    result = result + preds