for Opt, Arg in getopt.getopt(sys.argv[1:], '', [strParameter[2:] + '=' for strParameter in sys.argv[1::2]])[0]: if Opt == '--model' and Arg != '': arg_Model = Arg if Opt == '--data' and Arg != '': arg_DataRoot = Arg if Opt == '--thres' and Arg != '': arg_Thres = float(Arg) # using evaluation metrics(Must have data in the croppedgt directory) evaluation = True # fix random seed rng = np.random.RandomState(37148) # create instance of HED model net = HED() net.cuda() # load the weights for the model net.load_state_dict(torch.load(arg_Model)) # batch size nBatch = 1 # make test list for infer make_txt(arg_DataRoot,'test') # create data loaders from dataset testPath = os.path.join(arg_DataRoot, 'test.lst') print(testPath)
import numpy as np import pandas as pd from PIL import Image import skimage.io as io import matplotlib.cm as cm import matplotlib.pyplot as plt import torchvision.transforms as transforms # import the utility functions from model import HED from dataproc import TestDataset # fix random seed rng = np.random.RandomState(37148) # create instance of HED model net = HED() net.cuda() # load the weights for the model net.load_state_dict(torch.load('./train/HED.pth')) net.eval() # batch size nBatch = 1 # load the images dataset dataRoot = './cartoon_portrait_clean' # '/home/arnab/tinkering-projects/pytorch-hed/data/HED-BSDS/test' std = [0.229, 0.224, 0.225] mean = [0.485, 0.456, 0.406] transform = transforms.Compose(
nBatch = 1 # load the images dataset dataRoot = '../HED-BSDS/' valPath = dataRoot + 'val_pair.lst' trainPath = dataRoot + 'train_pair.lst' # create data loaders from dataset valDataset = TrainDataset(valPath, dataRoot) trainDataset = TrainDataset(trainPath, dataRoot) valDataloader = DataLoader(valDataset, shuffle=False) trainDataloader = DataLoader(trainDataset, shuffle=False) # initialize the network net = HED(pretrained=False) net.cuda(gpuID) # define the optimizer optimizer = optim.SGD(net.parameters(), lr=1e-5, momentum=0.9, weight_decay=0.0002) # initialize trainer class trainer = Trainer(net, optimizer, trainDataloader, valDataloader, nBatch=nBatch, maxEpochs=10,
########################################################## def test(model, opt): print("--------------Start Detecting---------------") # input=c_h_w # default input: w= 480,h == 320 print("--------------load image from:", opt.input, '--------------') tensorInput = torch.FloatTensor( numpy.array(PIL.Image.open(opt.input))[:, :, ::-1].transpose( 2, 0, 1).astype(numpy.float32) * (1.0 / 255.0)) intWidth = tensorInput.size(2) intHeight = tensorInput.size(1) tensorInput = tensorInput.cuda().view(1, 3, intHeight, intWidth) tensorOutput = (model(tensorInput)[0, :, :, :].cpu().clamp( 0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, 0] * 255.0).astype( numpy.uint8) PIL.Image.fromarray(tensorOutput).save(opt.output) print("--------------save image to:", opt.output, '--------------') if __name__ == '__main__': opt = parser.parse_args() model = HED.HED() model.load_state_dict(torch.load(opt.model)) print('--------------Load model from', opt.model, '--------------') model.cuda().eval() test(model, opt)
def main(args): done_epoch = 0 if args.resume and os.path.exists(args.model_path): print("resume training...") model = HED() model.load_state_dict(torch.load(args.model_path)) with open("{0}-history.csv".format(args.expname), 'r') as f: for i, l in enumerate(f): pass done_epoch = i else: print("initialize training...") model = HED() model_dict = model.state_dict() vgg_weights = get_vgg_weights() model_dict.update(vgg_weights) model.load_state_dict(model_dict) nn.init.constant_(model.fuse.weight_sum.weight, 0.2) nn.init.constant_(model.side1.conv.weight, 1.0) nn.init.constant_(model.side2.conv.weight, 1.0) nn.init.constant_(model.side3.conv.weight, 1.0) nn.init.constant_(model.side4.conv.weight, 1.0) nn.init.constant_(model.side5.conv.weight, 1.0) nn.init.constant_(model.side1.conv.bias, 1.0) nn.init.constant_(model.side2.conv.bias, 1.0) nn.init.constant_(model.side3.conv.bias, 1.0) nn.init.constant_(model.side4.conv.bias, 1.0) nn.init.constant_(model.side5.conv.bias, 1.0) logger = TrainLogger("{0}-history.csv".format(args.expname), overwrite=True) del (logger) dataset_train = HEDDataset(csv_path=args.train_list_path, root_dir=args.train_dir, enableBatch=True) dataset_test = HEDDataset(csv_path=args.test_list_path, root_dir=args.test_dir, enableBatch=True) train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True) test_loader = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=True) device = torch.device("cpu" if args.no_cuda else "cuda:0") model = model.to(device) sgd = opt.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) #sc_lambda = lambda epoch: 0.1 if epoch <5000 else 0.01 #scheduler = LambdaLR(sgd, lr_lambda=sc_lambda) train(model=model, device=device, train_loader=train_loader, test_loader=test_loader, optimizer=sgd, n_epochs=args.n_epochs, prefix=args.expname, done_epoch=done_epoch)
print("Loading train dataset...") rootDirImgTest = "BSDS500/data/images/test/" testOutput = "test/output-bsds/" testDS = BSDS_TEST(rootDirImgTest) test = DataLoader(testDS, shuffle=False) os.makedirs(testOutput, exist_ok=True) print("Loading trained network...") networkPath = "HED.pth" nnet = HED().cuda() dic = torch.load(networkPath) dicli = list(dic.keys()) new = {} j = 0 for k in nnet.state_dict(): new[k] = dic[dicli[j]] j += 1 nnet.load_state_dict(new) print("Generating test results...") for j, data in enumerate(tqdm(test), 0): image, imgName = data image = Variable(image, requires_grad=False).cuda() sideOuts = nnet(image)