def main(args): rs=[] for i in range(1,repeat): print('doing{}'.format(i)) test_loader = custom_data_loader.benchmarkLoader(args) model = custom_model.buildModel(args) # print(sum(p.numel() for p in model.parameters() if p.requires_grad)) recorder = recorders.Records(args.log_dir) # r=test_utils.test_split2c(args, 'test', test_loader, model, log, 1, recorder, padding=4, stride=128) r=test_utils.test_split_rob(args, 'test', test_loader, model, log, 1, recorder, padding=16, stride=96, noise_level=i) # r=test_utils.test(args, 'test', test_loader, model, log, 1, recorder) r.append(np.mean(r)) rs.append(np.stack(r)) # test_utils.test(args, 'test', test_loader, model, log, 1, recorder) # rs=np.stack(rs) # every_mean=np.mean(rs,axis=0) # mean=np.mean(rs) # print(every_mean,mean) print(rs) # rs=np.stack(rs,axis=0) # rs=np.mean(rs,axis=0) workbook = xlwt.Workbook(encoding='ascii') worksheet = workbook.add_sheet('My Worksheet', cell_overwrite_ok=True) for i,r in zip(range(repeat),rs): for ii,rr in zip(range(10),r): worksheet.write(i + 1, ii, label=rr) workbook.save('result/robust/pro.xls')
def main(args): rs = [] for i in range(15, 16): # path="data/Training5shadow/checkp_{}.pth.tar".format(i) # args.retrain=path log = logger.Logger(args) test_loader = custom_data_loader.benchmarkLoader(args) model = custom_model.buildModel(args) # print(sum(p.numel() for p in model.parameters() if p.requires_grad)) recorder = recorders.Records(args.log_dir) # r=test_utils.test_split2c(args, 'test', test_loader, model, log, 1, recorder, padding=4, stride=128) r = test_utils.test_split(args, 'test', test_loader, model, log, 1, recorder, padding=0, stride=64) rs.append(np.mean(r)) # test_utils.test(args, 'test', test_loader, model, log, 1, recorder) # rs=np.stack(rs) # every_mean=np.mean(rs,axis=0) # mean=np.mean(rs) # print(every_mean,mean) print(rs) f = open('runlog.txt', 'w+') f.writelines(np.array2string(rs)) f.close()
def main(args): rs = [] for i in range(repeat): args.in_img_num = -4 + i # path="data/Training5shadow/checkp_{}.pth.tar".format(i) # args.retrain=path log = logger.Logger(args) test_loader = custom_data_loader.benchmarkLoader(args) model = custom_model.buildModel(args) # print(sum(p.numel() for p in model.parameters() if p.requires_grad)) recorder = recorders.Records(args.log_dir) # r=test_utils.test_split2c(args, 'test', test_loader, model, log, 1, recorder, padding=4, stride=128) r = test_utils.test_split(args, 'test', test_loader, model, log, 1, recorder, padding=10, stride=120) rs.append(r) # test_utils.test(args, 'test', test_loader, model, log, 1, recorder) # rs=np.stack(rs) # every_mean=np.mean(rs,axis=0) # mean=np.mean(rs) # print(every_mean,mean) print(rs) workbook = xlwt.Workbook(encoding='ascii') worksheet = workbook.add_sheet('My Worksheet', cell_overwrite_ok=True) for i, r in zip(range(repeat), rs): for ii, rr in zip(range(9), r): worksheet.write(i + 1, ii, label=rr) workbook.save('result/sparse/sparse_pro{}.xls'.format(i))
def main(args): model = custom_model.buildModelStage0(args) val_loader = custom_data_loader.benchmarkLoader(args) #test_loader = custom_data_loader.benchmarkLoader(args) #model = custom_model.buildModel(args) recorder = recorders.Records(args.log_dir) test_utils.test(args, 'val', val_loader, model, log, 1, recorder) log.plotCurves(recorder, 'val')
def main(args): test_loader = custom_data_loader.benchmarkLoader(args) model = custom_model.buildModel(args) model_s2 = custom_model.buildModelStage2(args) models = [model, model_s2] recorder = recorders.Records(args.log_dir) test_utils.test(args, 'test', test_loader, models, log, 1, recorder)
def main(args): model = custom_model.buildModelStage3(args) recorder = recorders.Records(args.log_dir) val_loader = custom_data_loader.benchmarkLoader(args) #train_loader, val_loader = custom_data_loader.reflectanceDataloader(args) test_utils.testOnBm(args, 'val', val_loader, model, log, 1, recorder) log.plotCurves(recorder, 'val')
def main(args): rs=[] for i in range(5,6): # path="data/Training5shadow/checkp_{}.pth.tar".format(i) # args.retrain=path log = logger.Logger(args) test_loader = custom_data_loader.benchmarkLoader(args) model = custom_model.buildModel(args) # print(sum(p.numel() for p in model.parameters() if p.requires_grad)) recorder = recorders.Records(args.log_dir) # r=test_utils.test_split2c(args, 'test', test_loader, model, log, 1, recorder, padding=4, stride=128) test_utils.estimate(args, i, test_loader, model, log, 1, recorder, padding=4, stride=64, split=True)
def main(args): rs = [] # path="data/Training4shadow/checkp_{}.pth.tar".format(i) # args.retrain=path test_loader = custom_data_loader.benchmarkLoader(args) model = custom_model.buildModel(args) recorder = recorders.Records(args.log_dir) # r=test_utils.test_split(args, 'test', test_loader, model, log, 1, recorder, padding=4, stride=160) # errors=test_utils.test(args, 'test', test_loader, model, log, 1, recorder) errors = test_utils.test2c(args, 'test', test_loader, model, log, 1, recorder) csvfile = open('names.csv', 'w', newline='') obj = csv.writer(csvfile) for person in errors: obj.writerow(person) csvfile.close()
def main(args): test_loader = custom_data_loader.benchmarkLoader(args) model = custom_model.buildModel(args) recorder = recorders.Records(args.log_dir) test_utils.test(args, 'test', test_loader, model, log, 1, recorder) log.plotCurves(recorder, 'test')