def predict(predict_data): machine = predict_data.get('machineId') first_date = predict_data.get('first_date') last_date = predict_data.get('last_date') analysis.predict(machine, first_date, last_date) return make_response("Successfully predicted on given period", 200)
def analysismenu_print(v, f, d): print( "=============MENU=============\n1 - Regression Analysis Summary\n2 - Predict a Players Dispossession " "Value\n3 - Change Data") choice = int(input("============CHOICE============ \nInput - ")) if choice == 1: from analysis import summary summary(v, f, d) if choice == 2: from analysis import predict predict(v, f, d)
def predict(target_data=[], features=[], labels=[], dataset_path='models/data.csv', model_name='fraud_model'): return analysis.predict(target_data, features, labels, dataset_path, model_name)
parser.add_argument('--gpus', default='0', type=str, help='visible devices for CUDA') args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus test_gts = torch.load({ 'VOC': 'truths\\gts_voc07test.pth', 'COCO': 'truths\\gts_coco_17val.pth', 'SHWD': 'truths\\gts_shwd_test.pth' }[args.dataset]).float().cuda() print('name\tloss\tpower1/3\tgeomean\tmean\trecall\tpower3\tbestgts') template = '\t%.2f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f' bl = torch.load('anchors/ssd_coco_anchors.pth') bl = bl.cuda() _, bl_a = predict(bl, test_gts, True) print('ssdcoco' + template % tuple(bl_a)) bl = torch.load('anchors/voc_baseline.pth') bl = bl.cuda() _, bl_a = predict(bl, test_gts, True) print('ssdvoc' + template % tuple(bl_a)) bl = torch.load('anchors/yolov3_anchors.pth') bl = bl.detach().cuda() _, bl_a = predict(bl, test_gts, True) print('yolo' + template % tuple(bl_a)) test_lst = [tuple(o.split(',')) for o in args.test_pths.split(';')] for i, (algo, pth) in enumerate(test_lst): if algo == 'SSD300': cfg = config_dict[args.dataset]
def train(): if args.algo == 'SSD300': apt = IOAdapterSSD(config_dict[args.dataset], args.random_init) # args.algo == 'YOLOv3' else: apt = IOAdapterYOLOv3(random_range=args.random_init) # init params anchs, anch2fmap, fmap2locs, msks = apt.fit_input((1,)) # create data loader data_loader = BoundingBoxesLoader(dataset, None, args.batch_size, shuffle=True, drop_last=True, cache_pth=args.truths_pth) b_iter = iter(data_loader) # create optimizer # optimizer = optim.SGD(params + [alphas], lr=args.lr, momentum=args.momentum, # weight_decay=args.weight_decay) optimizer = optim.SGD([anchs], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # create loss function loss_fn = MixedIOULoss(ignore_size=args.ignore_size, lambda_=args.l) gen_fn = AnchorsGenerator(anchs, anch2fmap, fmap2locs) step = 0 # train for iteration in range(6000): try: truths = next(b_iter) except StopIteration: b_iter = iter(data_loader) truths = next(b_iter) if iteration in (4000, 5000): step += 1 adjust_learning_rate(optimizer, 0.1, step) truths = truths.float().cuda() if args.cuda else truths.float() optimizer.zero_grad() loss = torch.zeros(8) for i, msk in enumerate(msks): tmp_anchs = gen_fn(msk) loss[i] = loss_fn(tmp_anchs, truths) loss = loss.sum() loss.backward() optimizer.step() with torch.no_grad(): anchs.clamp_(0., 1.) if args.log: writer.add_scalar(args.save_pth, loss.item(), iteration + 1) if (iteration + 1) % 10 == 0: print('iter %d: loss=%.4f' % (iteration + 1, loss.item())) if (iteration + 1) % args.cache_interval == 0: if not os.path.exists('./cache/'): os.mkdir('./cache/') pth = './cache/%s_iter%d.pth' % (file_name, iteration + 1) torch.save((anchs, anch2fmap, fmap2locs, msks), pth) print('save cache to %s ' % pth) if args.test_per_cache: print(bl_results) with torch.no_grad(): maps = [] for i, msk in enumerate(msks): tmp_anchs = gen_fn(msk) maps.append(predict(tmp_anchs, test_gts, True)) print('\n'.join(['%dxAnchs = %.4f [loss:%.2f|power1/3:%.4f|geo mean:%.4f|' 'mean:%.4f|recall:%.4f|power3:%.4f|best gt:%.4f]' % (i + 1, o, *l) for i, (o, l) in enumerate(maps)])) for fmap, locs in fmap2locs.items(): fmap2locs[fmap] = locs.cpu() msks = [msk.cpu() for msk in msks] torch.save((anchs.detach().cpu(), anch2fmap, fmap2locs, msks), args.save_pth) if args.algo == 'YOLOv3': print(apt.fit_output())
file_name = get_file_name_from_path(args.save_pth) if args.log and 'train' in modes: from datetime import datetime writer = SummaryWriter('runs/adaptive_priors_loss/%s/' % datetime.now().strftime("%Y%m%d-%H%M%S")) if args.cuda: torch.set_default_tensor_type('torch.cuda.FloatTensor') dataset = None clamp = True test_gts = torch.load(args.eval_pth).float().cuda() bl = torch.load('anchors/yolov3_anchors.pth') bl = bl.detach().cuda() bl_y, bl_a = predict(bl, test_gts, True) bl_results = 'baseline = %.4f [loss:%.2f|power1/3:%.4f|geo mean:%.4f|mean:%.4f|recall:%.4f|power3:%.4f|best gt:%.4f]'\ % (bl_y, *bl_a) def train(): if args.algo == 'SSD300': apt = IOAdapterSSD(config_dict[args.dataset], args.random_init) # args.algo == 'YOLOv3' else: apt = IOAdapterYOLOv3(random_range=args.random_init) # init params anchs, anch2fmap, fmap2locs, msks = apt.fit_input((1,)) # create data loader
import numpy as np import analysis dataset_path = 'models/data.csv' model_name = 'fraud_model' features = [ 'userId', 'profileCompleted', 'visitedCountries', 'depositAmount', 'totalSpendingsInt', 'spendings', 'maxDailySpending' ] labels = [] test_data = [191670, 1, 2, 2492270, 14, 82939, 20000] target_data = np.array(test_data).reshape(-1, len(test_data)) x = analysis.predict(target_data, features, labels, dataset_path, model_name, True) y = x["prediction"] print(y) z = x["pred"]