def main(args): """ Main running script """ # Get the config file config = util.get_config(args.config) root_dir = config['ROOT_DIR'] # fill out initial folders if not os.path.isdir('{}/metadata'.format(root_dir)): os.mkdir('{}/metadata'.format(root_dir)) print('created metadata dir') if not os.path.isdir('{}'.format(config['OBS_ROOT'])): os.mkdir('{}'.format(config['OBS_ROOT'])) print('created OBS dir') if not os.path.isdir('{}'.format(config['ESTIMATORS_ROOT'])): os.mkdir('{}'.format(config['ESTIMATORS_ROOT'])) print('created ESTIMATORS dir') if not os.path.isdir('{}'.format(config['PREDICTIONS_ROOT'])): os.mkdir('{}'.format(config['PREDICTIONS_ROOT'])) print('created PREDICTIONS dir') if not os.path.isdir('{}'.format(config['QAQC_ROOT'])): os.mkdir('{}'.format(config['QAQC_ROOT'])) print('created QAQC dir') if not os.path.isdir('{}'.format(config['PLOT_ROOT'])): os.mkdir('{}'.format(config['PLOT_ROOT'])) print('created PLOT dir') # --- download data --- if args.clean: clean.main(config) else: print('skipping database cleaning') # --- download data --- if args.download: download.main(config) else: print('skipping download of new data') # --- train models if args.train: train.main(config) else: print('skip training') # --- make predictions --- if args.predict: predict.main(config) else: print('skipping download of new data') # --- run qaqc checks --- if args.qaqc: qaqc.main(config) else: print('skipping qaqc') # --- plot --- if args.plot: plot.main(config) else: print('skipping plots')
def main(): logger.info("Running main ...") train.main() predict.main() logger.info("Run complete ...") return
def main(): print 'TRAINING' train.main() print 'PREDICTING' predict.main() print 'EVAlUATING' evaluate.main()
def main(): print "TRAINING" train.main() print "PREDICTING" predict.main() print "EVAlUATING" evaluate.main()
def main1(): trainimgpath='/home/zhaojin/data/TacomaBridge/segdata/train/img/' checkpointsavepath='/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/' checkpointreadpath='/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/weight_logloss_softmax/CP30.pth' predictimgname='/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png' predictimgpath='/home/zhaojin/data/TacomaBridge/capture/high-reso-clip2_rename/' predictlblpath='/home/zhaojin/data/TacomaBridge/segdata/predict/' predictlblvizpath=predictlblpath.rstrip('/') + "_viz" predict.main(['--model',checkpointreadpath, '--input',predictimgname, '--viz'])
def main(): sqlite.main() createtable.main() myObj.main() rough.main() ratinggen.main() predict.main() #jsut.main() piechart.main() tocsv.main() predweek.main()
def test_house(self): # svr result = predict.main(f'{TEST_PATH}/house/test_config.json') """ assert_frame_equal( result['Y_pred_df'].round(0), pd.read_csv(f'{TEST_PATH}/house/output.csv').round(0)) """ # keras result = predict.main(f'{TEST_PATH}/house/test_config2.json') # ensemble(vote) result = predict.main(f'{TEST_PATH}/house/test_config3.json') # tabnet result = predict.main(f'{TEST_PATH}/house/test_config4.json')
def main(): feature.main() ''' 利用网格搜索选择参数,时间较长,可以不运行 ''' # params.main() ''' 利用最后一星期的数据来测试模型的效果 ''' test.main() ''' 预测结果 ''' predict.main()
def main(): tstart = dt.now() print('\nStarting with program\n') model_training.main() print('Finished training model in:\n', dt.now()-tstart) print('predicting outcome of test set:\n') bool_ = input('start predicting? Y/N?\n') if bool_=='Y': tstart2 = dt.now() predict.main() print('\nFinished predicting in:\n',dt.now()-tstart2) print('\nFinished whole program in:\n',dt.now()-tstart) print('Predictions can be found in:', Data_importer.location_lookup()["submission_path"])
def main(test_user, max_reco): ind = retrieve_pickle('review_index.txt') idf = calc_idf(ind, test_user) similarity_score = find_similarity(ind, test_user) #sim_score = take_max(similarity_score) sim_score = take_avg(similarity_score) sim_score = sorted(sim_score.iteritems(), key=operator.itemgetter(1), reverse=True) best_users = select_no_of_users(sim_score, 100) predict.main(ind, best_users, test_user, max_reco)
def getData(roadName): global tempData road = roadName[:-2] url = "" if '1000_E' in roadName: url = up_1000 elif '1000_S' in roadName: url = down_1000 elif '1200_E' in roadName: url = up_1200 elif '1200_S' in roadName: url = down_1200 elif '0150_E' in roadName: url = up_0150 elif '0150_S' in roadName: url = down_0150 elif '0600_E' in roadName: url = up_0600 elif '0600_S' in roadName: url = up_0600 else: print('\t[+] ERROR in roadName request - {}'.format(roadName)) return parsedList = parseURL(url) data = formatData(roadName[-1], parsedList, road) data = predict.main(data) return json.dumps(data)
def eval(): #sys.argv[1:]=[e2.get()] sys.argv[1:] = [e2.get(), e3.get()] print(e2.get()) print(e3.get()) dic, time1 = predict.main(predict.parse_arguments(sys.argv[1:])) var7.set(dic) var8.set(time1)
def uploaded_file(filename): predict.FilePaths.fnInfer = os.path.join(app.config['UPLOAD_FOLDER'], filename) result = predict.main() return render_template('second.html', text=result['Recognized'], prob=result['Probability'], accu=result['Accuracy'])
def textres(request): if request.method == "POST": x = request.POST.get('content') temp = get_template('textres.html') new_x = str(x) val = predict.main(new_x) cont = RequestContext(request,{'string':x,'sentiment':val}) return HttpResponse(temp.render(cont))
def get_caption(filename): os.chdir('../ImageCaptionGenerator') src = os.path.join('../image_autocaption/static/images', filename) dst = os.path.join('image/pred', filename) shutil.copyfile(src, dst) caption = predict.main() os.chdir(os.path.dirname(os.path.realpath(__file__))) return caption
def main(): import json import requests from bs4 import BeautifulSoup import predict title = input('Search by title: ') year = input('Year of title (Optional): ') omdbapi_url = 'http://www.omdbapi.com/?t=' + title + '&y=' + year + '&apikey=1fb69a60' s = requests.session() r = s.get(omdbapi_url) omdbapi_reponse = json.loads(r.text) reviews = [] if (omdbapi_reponse['Response'] == 'True'): imdbID = omdbapi_reponse['imdbID'] title = omdbapi_reponse['Title'] year = omdbapi_reponse['Year'] elif (omdbapi_reponse['Response'] == 'False'): print(omdbapi_reponse['Error']) input('No movie is found.\nPress Enter to continue...') return reviews imdb_url = 'https://www.imdb.com/title/' + imdbID + '/reviews/_ajax?' processed = 0 while True: r = s.get(imdb_url) soup = BeautifulSoup(r.text, 'html.parser') for div in soup.findAll('div', 'text show-more__control'): reviews.append(div.text) processed += 1 print(processed, ' reviews retrieved.', end='\r') load_more_data = soup.findAll('div', 'load-more-data') if load_more_data == []: break else: imdb_url = 'http://www.imdb.com/title/' + imdbID + '/reviews/_ajax?paginationKey=' + load_more_data[ 0]['data-key'] print('\nAll reviews retrieved.') predict.main(reviews, title=title, year=year)
def main(): with open('pi', 'rb') as f: data = pi.load(f) lt, count = [], 0 print('Select any one of the following to visualize graph') for i in data: lt.append(i) print(count, i) count += 1 print('Press any other key to visualize all graphs') try: r = int(input('Enter number:-')) except: print('Only integers are allowed.') exit() if r >= 0 and r < count: predict.main(True, lt[r]) else: predict.main(False, 'No')
def test(name=None): if request.method == 'POST': result = request.form result = pd.Series(result) query_input = str(result[0]) predictions = predict.main(query_input) predictions = json.dumps(predictions[0]) return render_template('test.html', userinput=predictions) else: return render_template('test.html')
def startLoad(self): self.startLoading.setEnabled(False) self.statusLabel.setText("Scaning Images.") scanner.main(MainWindow.form_id) self.statusLabel.setText("Images Scanned. Cropping Images.") cropper.main(MainWindow.form_id) self.statusLabel.setText("Images Cropped. Finding Tags.") PatternLocator.main(MainWindow.form_id) self.statusLabel.setText("Tags located. Extraacting Data.") predict.main(MainWindow.form_id) self.statusLabel.setText("Done.") self.startVerifying.setEnabled(True) temp = list(np.load("results.npy")) for each in temp: MainWindow.result.append(list(each)) MainWindow.x, MainWindow.y = len(MainWindow.result), len( MainWindow.result[1])
def result(): file = request.files['dataset'] interval = request.form['interval'] actualValue, lPredictedValue, sPredictedValue = main(file, interval) return render_template('result.html', a=actualValue, lp=lPredictedValue, sp=sPredictedValue, len=len(actualValue))
def index_get(): if request.method == 'GET': return render_template('index.html') elif request.method == 'POST': img = request.files['file'] img = img.stream.read() bin_data = io.BytesIO(img) file_bytes = np.asarray(bytearray(bin_data.read()), dtype=np.uint8) img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) result = main(img) print(result) return render_template('result.html', result=result)
def result(): global x, y, z file = request.files['dataset'] interval = request.form['interval'] destination = 'static/dataset.csv' file.save(destination) linear, rbf, date, a1, a2, coef, const = main(str(interval),destination) x = date y = linear z = rbf return render_template('result.html', valv = [linear, rbf, date, a1, a2, coef, const, len(linear)])
def main(): commands = ['train', 'predict', 'evaluate'] help_msg = \ ''' Usage: cliner [OPTIONS] COMMAND [ARGS]... Options: --help Show this message and exit. Commands: %s ''' % '\n '.join(commands) # Is argument correct? if len(sys.argv ) < 2 or sys.argv[1] not in commands or sys.argv[1] == '--help': sys.stderr.write('%s\n\n' % (help_msg)) exit(1) # select appropriate sub-command subcmd = sys.argv[1] del sys.argv[1] # Where to import code from homedir = os.path.dirname(os.path.abspath(__file__)) codedir = os.path.join(homedir, 'code') if codedir not in sys.path: sys.path.append(codedir) # Call appropriate sub-command if subcmd == 'train': import train train.main() elif subcmd == 'predict': import predict predict.main() elif subcmd == 'evaluate': import evaluate evaluate.main()
def run_dir(in_path, out_path): f = open('error.txt', 'w') cwd = os.getcwd() for item in os.listdir(in_path): os.chdir(cwd) e = True if item.endswith('.wav'): out_file_path = out_path + item.replace('.wav', '.TextGrid') if not os.path.exists(out_file_path): e = main(in_path + item, out_file_path) if not e: f.write(item + '\n') print('ERROR with file: ' + item) f.close()
def model_predict(img): """ Returns Severity of diagnosis :returns : prediction """ print('Loading Model...') # Getting the score of diagnosis score = predict.main() print('Model loaded. Check http://127.0.0.1:5000/') sev_diag = severity(score) return sev_diag
def get_video_stream(): """ Here is where we receive streamed images from the Kafka Server and convert them to a Flask-readable format. """ for msg in consumer: # print(type(msg.value)) # class <bytes> imageStream = io.BytesIO( msg.value ) # kafka msg.value will be in bytes format. This is to read binary img object which is msg.value imageFile = Image.open( imageStream ) #imageStream will have img binary data in object format or array format b = io.BytesIO() filename = "test.jpeg" imageFile.save(filename, format="jpeg") # test=imageFile.save(imageStream, format="jpeg") # print(type(test)) predict.main() yield (b'--frame\r\n' b'Content-Type: image/jpg\r\n\r\n' + msg.value + b'\r\n\r\n')
def result(): global x, y, z file = request.files['dataset'] interval = request.form['interval'] destination = 'static/dataset.csv' file.save(destination) linear, rbf, date, a1, a2, coef, const = main(str(interval), destination) x = date y = linear z = rbf return render_template( 'result.html', valv=[linear, rbf, date, a1, a2, coef, const, len(linear)])
def stacked_dataset(members): stackX = None for i in members: yhat = main(i) print(yhat.size()) # stack predictions into [rows, members, probabilities] if stackX is None: stackX = yhat else: stackX = dstack((stackX, yhat)) # flatten predictions to [rows, members x probabilities] print(type(stackX)) stackX = stackX.reshape( (stackX.shape[0], stackX.shape[1] * stackX.shape[2])) return stackX
def lips_to_text(): # try: start = time.time() video = request.files.get("video") if video == None: return Response("No video received", status=400) vname = "video." + video.filename.split(".")[-1] start0 = time.time() video.save(vname) print("video saving ", time.time() - start0) result = predict.main(vname) print("request time ", time.time() - start) return Response(result)
def setPredictWindow(conn): print('predictwindow!!!!') #title, id, url, channel, duration, viewcount #predict 후 운동이름, 영상리스트 순서대로 보냄. exercise_name = Predict.main() conn.sendall( pack('i', len(exercise_name.encode('utf-8'))) + exercise_name.encode('utf-8')) setSelectWindow(exercise_name, conn) os.system( 'cd C:\\Users\\MBM\\Desktop\\TRAIN\\video_classification\\data\\predict' ) os.system('del *.jpg')
def getAnswers(self): res = [] #import predict #TODO put input img path and question. #predict.main(img,question) # get answers from model eval forward ##### ASK QUESTION HERE # for i in range(len(self.answers)): # self.answers[i].set(res[i]) ans_map, answer_probab_tuples = predict.main(self.filename, self.getQuestion()) for i in range(5): res.append(ans_map[answer_probab_tuples[i][1]] +"," + "%0.2f" % (-100*answer_probab_tuples[i][0]) + "%") self.answers[i].set(res[i])
def detection(): if request.method == 'POST': image_b64 = request.values['image'] #vyžádání obrázku z html form jako base64 data image_b64 = re.sub('^data:image/.+;base64,', '', image_b64) #odříznutí metadat image_data = BytesIO(base64.b64decode(image_b64)) #dekódovaní base64 dat image = Image.open(image_data) predictions = main(image_data) #předání snímku customvision modelu image = draw_boxes(image, predictions) #vykreslí boxy #pokud nenajde dost nebo žádný objekt vrátí uživatele zpět if image == None: return render_template('camera.html', message="Nenalezeno dost objektů, zkuste to prosím znovu.") #uloží snímek s vykreslenými boxy image = image.save(os.path.join(app.config['IMAGE_UPLOADS'], '{}.png'.format(predictions[0]['probability']))) return render_template('choice.html', user_image=os.path.join(app.config['IMAGE_UPLOADS'], '{}.png'.format(predictions[0]['probability'])) ,port1=predictions[0]['tagName'], port2=predictions[1]['tagName'])
def index(): # This is a dummy list, 2 nested arrays containing some # params and values content = request.get_json(silent=True) print (content) n = json.dumps(content) j = json.loads(n) print (j['question']) question = j['question'] answer = "placeholder" answer = predict.main(question) #os.system('predict.py') list = [ {'param': 'answer', 'val': answer}, ] # jsonify will do for us all the work, returning the # previous data structure in JSON return jsonify(results=list)
print "Creating a development set for cross-validation" dev = defaultdict(list) for i in range(DEV_SIZE/2): dev['pos'].append(data.train['pos'].pop()) dev['neg'].append(data.train['neg'].pop()) pos = data.train['pos'][:] neg = data.train['neg'][:] print "Batch loop - running train and predict" for i in range(BATCHES): print "****** Batch #%d ******" % (i+1) # Build training set t = defaultdict(list) for j in range(BATCH_SIZE/2): t['pos'].append(pos.pop()) t['neg'].append(neg.pop()) # train main(t) with open(os.path.join(TMP_DIR, 'batch_%d.txt' % (i)), 'w') as f: # predict predict.main(dev, f)
def run_dir(in_path, out_path): for item in os.listdir(in_path): if item.endswith('.wav'): out_file_path = out_path + item.replace('.wav', '.TextGrid') main(in_path + item, out_file_path)
def main(): parser = argparse.ArgumentParser(description="CliRel (Clinical Relation) \ extractor- trains a classifier able to \ determine the type of relation between \ two medical concepts in a sentence.") # Add arguments here parser.add_argument("--train", nargs=3, metavar=("train_dir", "model_file", "model_type"), type=str, help="Directory should contain three subdirs (txt, \ concept, rel) containing .txt, .con, .rel files. \ Will train a classifier on this data. \ Trained model will be written to specified model file.\n \ Current model types:[svm-spt, svm-insert, svm-suffix]", default=None) parser.add_argument("--predict", nargs=3, metavar=("test_dir", "model_file", "results_dir"), type=str, help="Directory contains concept and text files \ that the specified (or default) model will predict. \ Resulting relation files will be written to \ the specified results directory.", default=None) parser.add_argument("--evaluate", nargs=3, metavar=("test_dir", "gold_dir", "eval_file"), type=str, help="Evaluate the relation files in the test directory \ in comparison with those in the gold directory. The \ results will be written to the evaluation file.", default=None) parser.add_argument("--verbose", action="store_true", default=False, help="Show debugging info.") args = parser.parse_args() if not args.predict and not args.train and not args.evaluate: sys.stderr.write("ERROR: No valid flag specified.\n") parser.print_help() sys.exit(1) if args.train: checkDir(args.train[0]) checkDir(os.path.dirname(args.train[1])) if (os.path.isdir(args.train[1])): sys.stderr.write("ERROR: Model expected to be a file, %s is a directory\n" % args.train[1]) sys.exit(1) train.main(args.train[0], args.train[1], args.train[2], args.verbose) if args.predict: checkDir(args.predict[0]) checkFile(args.predict[1]) checkDir(args.predict[2]) predict.main(args.predict[0], args.predict[1], args.predict[2], args.verbose) if args.evaluate: checkDir(args.evaluate[0]) checkDir(args.evaluate[1]) checkDir(os.path.dirname(args.evaluate[2])) if (os.path.isdir(args.evaluate[2])): sys.stderr.write("ERROR: eval_file expected to be a file, %s is a \ directory\n" % args.evaluate[2]) sys.exit(1) evaluate.main(args.evaluate[0], args.evaluate[1], args.evaluate[2], args.verbose)