def process(plt, dir, depth, rgb): #extract depthmap with zipfile.ZipFile(dir + '/depth/' + depth, 'r') as zip_ref: zip_ref.extractall('.') utils.parseData('data') #read rgb data global im_array if rgb: width = utils.getWidth() height = utils.getHeight() pil_im = Image.open(dir + '/rgb/' + rgb) pil_im = pil_im.resize((width, height), Image.ANTIALIAS) im_array = np.asarray(pil_im) else: im_array = 0 #parse calibration global calibration calibration = utils.parseCalibration(dir + '/camera_calibration.txt')
def main(): df = parseData(sys.argv[1]) #print(df) df = processData(df) #print(df) drawFig(df, 'time')
platform = 'android' # Keyword (separated by either blanks or by the following logic commands: OR/AND/NOT keyword = 'diabetes OR mellitus' # App categories (separated by commas) categories = 'MEDICAL, LIFESTYLE, EDUCATION, HEALTH_AND_FITNESS' # Token of 42Matters token = '6dc3eba38263b374e06986f69c876c3ea6cb2f9f' # App's metadata language (all languages if argument is in blank) langs = 'en' con = connection.Connection() con.set_categories(categories) con.set_keyword(keyword) con.set_langs(langs) con.set_os(platform) con.set_token(token) data = con.get_data() utils = utils.Utils() result = utils.parseData(data) csv_file_name = 'result.csv' csv_export = csvExport.csvExport() csv_export.set_file_name(csv_file_name) csv_export.write_to_csv(result)
args.init_lr, betas=(args.beta_1, args.beta_2)) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.lr_decay, last_epoch=-1) if args.masked_loss: print('using mask') criterion = utils.Criterion_mask(args) else: criterion = utils.Criterion(args) model.train() for epoch in range(1, args.epochs + 1): print('---- Start Training Epoch %d: %d batches ----' % (epoch, len(train_loader))) scheduler.step() for i, sample in enumerate(train_loader): data = utils.parseData(args, sample, 'train') input = [data['input']] if args.in_light: input.append(data['l']) output = model(input) optimizer.zero_grad() loss = criterion.forward(output, data['tar']) criterion.backward() optimizer.step() print("Loss in epoch %d: %.3f" % (epoch, loss)) torch.save(model, './TrainedModels/model_new.pth.tar') print("saved the model")
test_set = DiLiGenT_main(args, 'test') test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch, num_workers=args.workers, pin_memory=args.cuda, shuffle=False) model.eval() print('---- Testing for %d images - DiLiGent Dataset ----' % (len(test_loader))) err_mean = 0 with torch.no_grad(): for i, sample in enumerate(test_loader): data = utils.parseData(args, sample, 'test') input = [data['input']] if args.in_light: input.append(data['l']) output = model(input) acc = utils.errorPred(data['tar'].data, output.data, data['m'].data) err_mean = err_mean + acc print('error: %.3f' % (acc)) result = (output.data + 1) / 2 result_masked = result * data['m'].data.expand_as(output.data) save_path = './Results/' + 'img8_mask_%d.png' % (i + 1) tv.utils.save_image(result_masked, save_path) print('saved image %d' % (i + 1)) print('------------ mean error: %.3f ------------' %
#import src.api.utils as utils import config import utils app = Flask(__name__) logger = utils.setup_logger() @app.route('/') def hello_world(): return 'Running Correctly!' if __name__ == '__main__': if config.STARTUP["DOWNLOAD"]: utils.downloadAllData() if config.STARTUP["EXTRACT"]: utils.extractData() if config.STARTUP["PARSE"]: data = utils.parseData() if config.STARTUP["REBUILD_DB"]: utils.buildDB(data) #app.run(debug=config.DEBUG, host = config.HOST) app.run(host=config.HOST)