def run_caption(parser_cap): args = parser_cap.parse_args() args = vars((args)) opt_cap = json.load(open(args["recover_opt"])) for k, v in args.items(): opt_cap[k] = v os.environ['CUDA_VISIBLE_DEVICES'] = opt_cap["gpu"] eval.main(opt_cap) return 0
def test_build_graph(self, eval_real_images, conditional_eval): FLAGS.eval_real_images = eval_real_images FLAGS.conditional_eval = conditional_eval # Mock `frechet_inception_distance` and `inception_score`, which are # expensive. with mock.patch.object( eval.util, 'get_frechet_inception_distance') as mock_fid: with mock.patch.object(eval.util, 'get_inception_scores') as mock_iscore: mock_fid.return_value = 1.0 mock_iscore.return_value = 1.0 eval.main(None, run_eval_loop=False)
def test_build_graph(self, eval_real_images, conditional_eval): FLAGS.eval_real_images = eval_real_images FLAGS.conditional_eval = conditional_eval # Mock `frechet_inception_distance` and `inception_score`, which are # expensive. with mock.patch.object(eval.util, 'get_frechet_inception_distance') as mock_fid: with mock.patch.object(eval.util, 'get_inception_scores') as mock_iscore: mock_fid.return_value = 1.0 mock_iscore.return_value = 1.0 eval.main(None, run_eval_loop=False)
def random(): url = request.data.decode('UTF-8') # print("URL printing: ",url) useful = url.split("/o/")[1] uid, transid = useful.split("%2F")[0:2] print(uid, transid) name = uid + "_" + transid + ".png" os.system("wget \"{}\" -O static/uploads/{}".format(url, name)) json = main(name) json["Link"] = url json["Status"] = 0 json["Category"] = "Misc" ''' json = { "Address" : "Ground Floor, Shop no. 12 13 20 21& 22", "Amount" : "99.76", "Category" : "Shopping", "Company" : "TAX INVOCE", "Date" : "2/26/2020", "Items" : "1 Reg HT PM Paneer", "Link" : "https://firebasestorage.googleapis.com/v0/b/expense-tracker-7e30c.appspot.com/o/UUNs2qVregW6zrFJDQd7OEaKNV72%2F17%2FJPEG_20200331_182936.jpg?alt=media&token=ffabed8d-e1fd-43cf-b39f-5a07f1b86f8e", "Status" : "0" } ''' # print(json) result = firebase.put('/Bills/{}/'.format(uid), transid, json) return "Job done!"
def upload_and_crop(): if request.method == "POST": f = request.files["image"] uid = request.form.get("uid") billid = request.form.get("billid") image_name = uid + "_" + billid basedir = os.path.abspath(os.path.dirname(__file__)) f.save(os.path.join(basedir, "static/uploads/", image_name + ".png")) option = request.form.get("option") option = int(option.split(".")[0]) # print(request.form.get("checkbox")) if option == 1: return render_template("crop.html", image_name=image_name) if option == 2: json = main( os.path.join(basedir, "static/uploads/", image_name + ".png")) print(json, type(json)) return render_template("result.html", image_name=image_name, json=json, uid=image_name.split("_")[0], billid=image_name.split("_")[1])
def submit(): if request.method=="POST": type = request.form.get("traffic_type") expected = type type = type.lower() print(type) attacks = ["normal","dos","r2l","u2r","probe"] if type not in attacks: return render_template("index.html") pred, prob = main(type) dict = {"expected":expected,"predictions":attacks[pred], "normal":prob[0], "dos":prob[1], "u2r":prob[3], "r2l":prob[2], "probe":prob[4]} return render_template("result.html",dict=dict) # Commands to run #------------------------------------------------------------------------------------------- # export FLASK_APP=server.py # export FLASK_DEBUG=1 # python -m flask run --host 0.0.0.0 --port 5000
def crop_and_result(): id, x1, y1, x2, y2 = request.form["id"], int(request.form["x1"]), int( request.form["y1"]), int(request.form["x2"]), int(request.form["y2"]) print(id, x1, y1, x2, y2) img = cv2.imread("./static/uploads/{}.png".format(id)) yo, xo, ch = img.shape scale = yo / 650 y1_new = scale * y1 x1_new = scale * x1 y2_new = scale * y2 x2_new = scale * x2 img_crop = img[int(y1_new):int(y2_new), int(x1_new):int(x2_new)] cv2.imwrite("./static/uploads/{}_crop.png".format(id), img_crop) time.sleep(1) basedir = os.path.abspath(os.path.dirname(__file__)) json = main(os.path.join(basedir, "static/uploads/", id + "_crop.png")) return render_template("result.html", image_name=id + "_crop", json=json, uid=id.split("_")[0], billid=id.split("_")[1])
], 'Dataset30_2019-05-29_22-35': ['Dataset30', 'Dataset31', 'Dataset32', 'Dataset33', 'Dataset34'] } pascal_dir = 'pascal_2019-05-29_08-20' cuda = False p = params.get_params() p.add('--in-dir') p.add('--root-dir') cfg = p.parse_args() cfg.root_dir = root_dir for run_dir in dirs.keys(): for s in dirs[run_dir]: cfg.in_dir = pjoin(root_dir, s) cfg.cuda = cuda cfg.csv_loc_file = pjoin(root_dir, s, 'gaze-measurements', 'video1.csv') # with pascal trained cfg.run_dir = pjoin(root_dir, 'unet_region', 'runs', pascal_dir) cfg.in_dir = pjoin(root_dir, s) eval.main(cfg) # with self trained cfg.run_dir = pjoin(root_dir, 'unet_region', 'runs', run_dir) eval.main(cfg)
def test_build_graph(self, eval_real_images): flags.FLAGS.eval_real_images = eval_real_images eval.main(None, run_eval_loop=False)
import sys import extract as ex import eval as ev train_file, annotation_file, output_file = sys.argv[1:] ex.main(train_file, output_file) ev.main(annotation_file, output_file)
plt.title('Scores') plt.show() def plot_score_difference(score_file1, score_file2): plt.figure() legend = [] score_file1_obj = score_file_to_object(score_file1) score_file2_obj = score_file_to_object(score_file2) # sorted_dict = sorted(score_file_obj, key=lambda key: score_file_obj[key]) x = list(score_file1_obj.keys()) y1 = list(score_file1_obj.values()) y2 = list(score_file2_obj.values()) y = list(np.array(y2) - np.array(y1)) plt.plot(x, y) plt.axhline(0, color='black') legend.append('y = Comparing {0} against {1}'.format(score_file2, score_file1)) fontP = FontProperties() fontP.set_size('small') plt.legend(legend, "title", prop=fontP) plt.ylabel('Score difference') plt.xlabel('Query ID') plt.title('Scores') plt.show() if __name__ == '__main__': tobeEval = [gt, output] main(tobeEval) # score_files = ['../output/baseline_tfidf_score.out', '../output/baseline_bm25_score.out'] # plot_graph(score_files) # plot_score_difference(score_files[0], score_files[1]) # python eval.py data/qrels.txt output/baseline.out
def test_build_graph(self): eval.main(None, run_eval_loop=False)
import configargparse import distutils.util from eval import main parser = configargparse.ArgumentParser(description='Eval network for few-shot learning') parser.add_argument('--model.model_path', type=str, metavar='PATH', help="location of pretrained model to evaluate") parser.add_argument('--data.test_way', type=int, metavar='N', default=0, help="number of classes per episode in test") parser.add_argument('--data.test_shot', type=int, metavar='N', default=0, help="number of support examples per class in test") parser.add_argument('--data.test_query', type=int, metavar='N', default=0, help="number of query examples per class in test") parser.add_argument('--data.test_episodes', type=int, metavar='N', default=1000, help="number of test episodes per epoch") parser.add('--data.cuda', type=distutils.util.strtobool, metavar='BOOL', help="run in CUDA mode", default=True) parser.add('--data.gpu', type=int, metavar='N', help="GPU device (starts from 0)", default=0) opts = vars(parser.parse_args()) main(opts)
import eval import extract if __name__ == '__main__': extract.main('data/Corpus.DEV.txt', 'data/Corpus.TRAIN.txt', 'data/TRAIN.annotations', 'output.dev.txt') eval.main('data/DEV.annotations', 'output.dev.txt')
def graphData(stock, MA1, MA2, interval): fig.clf() ''' Use this to dynamically pull a stock: ''' # try: print('Currently Pulling', stock) data, meta_data = ts.get_intraday(symbol=stock, interval=str(interval) + 'min') # data.to_csv('data/NSEI1min.csv') # data = data.iloc[::-1] data['date'] = data.index data['date'] = data['date'].map(mdates.date2num) #print(data) global df_train scores = news2sentiment() df_train, minmax_for = preprocess_data(data, scores) #print('Data Frame:-',df_train) # except Exception as e: # print(str(e), 'failed to pull pricing data') # try: ## preparation for candlestick date, openp, highp, lowp, closep, volume = data['date'].tolist(), data[ '1. open'].tolist(), data['2. high'].tolist(), data['3. low'].tolist( ), data['4. close'].tolist(), data['5. volume'].tolist() x = 0 y = len(date) newAr = [] while x < y: appendLine = date[x], openp[x], highp[x], lowp[x], closep[x], volume[x] newAr.append(appendLine) #contains data for candlestick ohlc plot x += 1 global count global results_backup global results_backup_lstm global results_backup_gru global date2add global prev_date # main axis in the figure ax1 = plt.subplot2grid((6, 4), (1, 0), rowspan=4, colspan=4) # candlestickohlc plot from mplfinanace candlestick_ohlc(ax1, newAr, width=.0005, colorup='#53c156') # workaround for updating plot after interval size without disturbing the forecast plot if count == -1: results = [] results_LSTM = [] results_GRU = [] temp = [] results_LSTM.append(forecast_LSTM(df_train, minmax_for, data)) results_GRU.append(forecast_GRU(df_train, minmax_for, data)) for i in range(len(results_LSTM[0])): temp.append((results_LSTM[0][i] + results_GRU[0][i]) / 2) results.append(temp) results_backup = results # results_backup_lstm = results_LSTM # results_backup_gru = results_GRU manual_run = False main(temp, 10, "model_noisynstepperdddqn_20", True, manual_run) #print('Results Leng:-', len(results[0])) prev_date = date ax1.axvline(x=date[-1], color='r', linewidth=2) date2add = [date[-1]] # print(date) for i in range(test_size): date2add.append(date2add[-1] + (0.0006944444 * interval)) # for no, r in enumerate(results_LSTM): # ax1.plot(date + date2add[1:],r, label = 'LSTM', linewidth = 2, alpha = 0.5) # for no, r in enumerate(results_GRU): # ax1.plot(date + date2add[1:],r, label = 'GRU', linewidth = 2, alpha = 0.5) for no, r in enumerate(results): ax1.plot(date + date2add[1:], r, label='LSTM+GRU', linewidth=2) count += 1 else: if count == test_size - 1: ax1.axvline(x=prev_date[-1], color='r', linewidth=2) # for no, r in enumerate(results_backup_lstm): # ax1.plot(date + date2add[1:],r, label = 'LSTM', linewidth = 2, alpha = 0.5) # for no, r in enumerate(results_backup_gru): # ax1.plot(date + date2add[1:],r, label = 'GRU', linewidth = 2, alpha = 0.5) for no, r in enumerate(results_backup): ax1.plot(prev_date + date2add[1:], r, label='LSTM+GRU', linewidth=2) count = -1 elif count != test_size: ax1.axvline(x=prev_date[-1], color='r', linewidth=2) # for no, r in enumerate(results_backup_lstm): # ax1.plot(date + date2add[1:],r, label = 'LSTM', linewidth = 2, alpha = 0.5) # for no, r in enumerate(results_backup_gru): # ax1.plot(date + date2add[1:],r, label = 'GRU', linewidth = 2, alpha = 0.5) for no, r in enumerate(results_backup): ax1.plot(prev_date + date2add[1:], r, label='LSTM+GRU', linewidth=2) count += 1 # Plotting close price on top of candlestick ohlc ax1.plot(date, closep, color='#e75480', label='Closing Price', linewidth=2) ax1.xaxis.set_major_locator(mticker.MaxNLocator(20)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S')) plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper')) plt.ylabel('Stock price and Volume') maLeg = plt.legend(loc=9, ncol=2, prop={'size': 10}, fancybox=True, borderaxespad=0.) maLeg.get_frame().set_alpha(0.4) textEd = pylab.gca().get_legend().get_texts() pylab.setp(textEd[0:5]) volumeMin = 0 ax0 = plt.subplot2grid((6, 4), (0, 0), sharex=ax1, rowspan=1, colspan=4) rsi = rsiFunc(closep) posCol = '#386d13' negCol = '#8f2020' plt.title(stock.upper()) ax0.plot(date, rsi, linewidth=1.5) ax0.axhline(70, color=negCol) ax0.axhline(30, color=posCol) ax0.fill_between(date, rsi, 70, where=(rsi >= 70), alpha=0.5) ax0.fill_between(date, rsi, 30, where=(rsi <= 30), alpha=0.5) ax0.set_yticks([30, 70]) plt.ylabel('RSI') ax1v = ax1.twinx() ax1v.fill_between(date, volumeMin, volume, facecolor='#ffd700', alpha=.5) ax1v.axes.yaxis.set_ticklabels([]) ax1v.grid(False) ###Edit this to 3, so it's a bit larger ax1v.set_ylim(0, 3 * max(volume)) ax2 = plt.subplot2grid((6, 4), (5, 0), sharex=ax1, rowspan=1, colspan=4) nslow = 26 nfast = 12 nema = 9 emaslow, emafast, macd = computeMACD(closep) ema9 = ExpMovingAverage(macd, nema) ax2.plot(date, macd, lw=2) ax2.plot(date, ema9, lw=1) ax2.fill_between(date, macd - ema9, 0, alpha=0.5) plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper')) plt.ylabel('MACD') ax2.yaxis.set_major_locator(mticker.MaxNLocator(nbins=5, prune='upper')) plt.xticks(rotation=45) plt.setp(ax0.get_xticklabels(), visible=False) plt.setp(ax1.get_xticklabels(), visible=False) plt.tight_layout() datetimeobj = datetime.now() if interval == 1: print('Wait for ' + str(interval) + ' minute') else: print('Wait for ' + str(interval) + ' minutes') # plt.show() ## To save every plot fig.savefig('test_image/example' + str(datetimeobj.hour) + '_' + str(datetimeobj.minute) + '_' + str(datetimeobj.second) + '.png', facecolor=fig.get_facecolor())
def my_main(): # fix some flags needed for EAST fix_flags() # rotate image for image_name in os.listdir(INPUT_PATH): if is_image(image_name): preprocess_image(image_name) # EAST algorithm eval.main() # Output texts from EAST boxes for image_name in os.listdir(OUTPUT_PATH): if is_image(image_name): print("image name: " + str(image_name)) print("Num book: ") num_book = int(input()) # calcolo dei cluster e del testo in loro contenuto clusters = text_recognition(image_name) # filter cluster with less than one element if REMOVE_CLUSTER_WITH_1_ELEMENT: to_remove = [] for clus in clusters: if len(clus.texts) == 1: to_remove.append(clus) for r in to_remove: clusters.remove(r) # get exel file library database excel_file = EXCEL_FILE text_analyser = TextAnalysis(excel_file) # votation system votation_system(clusters, text_analyser) # unite all barcodes with their confidence value to decide what set of books are the most probable all_barcodes = { i[0]: 0 for i in text_analyser.library_manager_tool.data[:, 0] } for cluster in clusters: for barcode in cluster.get_best_barcodes_match(): all_barcodes[barcode] += cluster.get_confidence_value() print("image name: " + str(image_name)) print("Num book: " + str(num_book)) # plot the histogramm of all barcodes and their confidence value # plot_barcode_histrogramm(all_barcodes) # find the most probable start and end barcodes from the histrogramm dict_conversion = {} count = 0 for i in all_barcodes.keys(): dict_conversion[count] = i count += 1 li = list(all_barcodes.values()) slices = (li[x:x + num_book] for x in range(len(li) - num_book + 1)) slices = [sum(x) for x in slices] mx = max(slices) index_of_largest = slices.index(mx) start_barcode = dict_conversion[index_of_largest] end_barcode = dict_conversion[index_of_largest + num_book - 1] id_start = index_of_largest id_end = index_of_largest + num_book - 1 print("Most probable start and end barcodes are: " + str(start_barcode) + " | " + str(end_barcode)) # get all records between estimated start and end barcodes from the database library barcodes_in_shelf = [] records_in_shelf = [] for i in range(num_book): barcodes_in_shelf.append(dict_conversion[index_of_largest + i]) df = pd.read_excel(EXCEL_FILE) for i in barcodes_in_shelf: records_in_shelf.append( text_analyser.library_manager_tool.get_record_by_barcode( i)) # find the medium rappresentative color of the image medium_collocation = float( ((df.loc[df['barcode'] == dict_conversion[index_of_largest + int(num_book / 2)]] )["collocation"].values[0]).split()[0]) color_code_shelf = collocation_2_color(medium_collocation) print("Most probable color is: " + color_code_shelf) # try to find book that are out of color bounds list_warnings_out_of_ids3 = [] for clus in clusters: if len(clus.get_texts()) > 1: in_range_sum = 0 for barcode in clus.get_best_barcodes_match(): num = text_analyser.library_manager_tool.get_number_by_barcode( barcode) if id_start < num < id_end: in_range_sum += clus.get_confidence_value() if clus.get_confidence_value() > in_range_sum: list_warnings_out_of_ids3.append(clus) # print("These book are probably out of color in the image:") # print(list_warnings_out_of_ids3) print("Numero di libri probabilmente fuori id : " + str(len(list_warnings_out_of_ids3))) image = cv2.imread(CLUSTERED_PATH + image_name) image_name_out_of_ids = append_id(image_name, "out_id") cv2.imwrite(CLUSTERED_PATH + image_name_out_of_ids, image.copy()) for clus in list_warnings_out_of_ids3: draw_word_in_cluster(image_name_out_of_ids, clus, "OUT_ID_", offset_x=-500, offset_y=0, color=(0, 0, 255)) # try to find book that are out of start and end barcodes bounds list_warnings_out_of_color3 = [] for clus in clusters: if len(clus.get_texts()) > 1: in_range_sum = 0 for barcode in clus.get_best_barcodes_match(): collocation = get_collocation_from_barcode( df, barcode).number if collocation_2_color( collocation) == color_code_shelf: in_range_sum += clus.get_confidence_value() if clus.get_confidence_value() > in_range_sum: list_warnings_out_of_color3.append(clus) # print("These book are probably out of start/end barcodes bounds:") # print(list_warnings_out_of_color3) print("Numero di libri probabilmente fuori colore : " + str(len(list_warnings_out_of_color3))) image = cv2.imread(CLUSTERED_PATH + image_name) image_name_out_of_col = append_id(image_name, "out_col") cv2.imwrite(CLUSTERED_PATH + image_name_out_of_col, image.copy()) for clus in list_warnings_out_of_color3: draw_word_in_cluster(image_name_out_of_col, clus, "OUT_COL", offset_x=-500, offset_y=80, color=(0, 0, 255)) # create a list of BookFieldProposal from the records in the bounds list_book_field_proposal2 = [] # assign recursevely each BFP to his most probable cluster to have a 1 to 1 match for rec in records_in_shelf: barcode = rec[0][0] tmp_list = [] for num_col, field in enumerate(rec): if num_col == 1 or num_col == 2: tmp_list += field book_field_proposal = BookFieldProposal(barcode, tmp_list) list_book_field_proposal2.append(book_field_proposal) for book_f in list_book_field_proposal2: match_clusters_kook_proposals(book_f, list_book_field_proposal2, clusters) draw_assigmente_bfp_outcome(list_book_field_proposal2, image_name, df, specify_name="V2_") # filter not assigned BFP restricted_list_book_field_proposal2 = [] for i in range(0, len(list_book_field_proposal2)): bfp_title_and_author = list_book_field_proposal2[i] if bfp_title_and_author.get_cluster() is not None: restricted_list_book_field_proposal2.append( bfp_title_and_author) # find witch book are probably out of order in the image order_warning_ = [] restricted_list_book_field_proposal2.sort( key=lambda x: x.get_cluster().calc_cluster_center()[1], reverse=True) # print(restricted_list_book_field_proposal2) for bfp_id in range(1, len(restricted_list_book_field_proposal2) - 1): precedent = restricted_list_book_field_proposal2[bfp_id - 1] precedent_collocation = get_collocation_from_barcode( df, precedent.get_barcode()) current = restricted_list_book_field_proposal2[bfp_id] current_collocation = get_collocation_from_barcode( df, current.get_barcode()) next = restricted_list_book_field_proposal2[bfp_id + 1] next_collocation = get_collocation_from_barcode( df, next.get_barcode()) if precedent_collocation <= current_collocation: if current_collocation > next_collocation: if precedent_collocation <= next_collocation: order_warning_.append(current) else: order_warning_.append(next) else: if precedent_collocation <= next_collocation: order_warning_.append(current) else: order_warning_.append(precedent) order_warning_ = list(set(order_warning_)) # print(order_warning_) print("Number of order error found: " + str(len(order_warning_))) image = cv2.imread(CLUSTERED_PATH + image_name) image_name_out_of_order = append_id(image_name, "out_order") cv2.imwrite(CLUSTERED_PATH + image_name_out_of_order, image.copy()) for order_war_bfp in order_warning_: clus = order_war_bfp.get_cluster() draw_word_in_cluster(image_name_out_of_order, clus, "OUT_ORDER", offset_x=0, offset_y=100, color=(0, 165, 255)) print("Finished") remove_tmp_files()
####################### # Training Parameters # ####################### parser.add_argument('--mark_phi', type=int, default=1, help='mark phi') parser.add_argument('--batch_size', type=int, default=4, help='mini batch size') parser.add_argument('--epoch', type=int, default=50, help='number of epochs to train') parser.add_argument('--opt', default='adam', help='optimization method') parser.add_argument('--lr', type=float, default=0.0075, help='learning rate') parser.add_argument('--reg', type=float, default=0.0001, help='learning rate') parser.add_argument('--init_emb', default=None, help='Initial embedding to be loaded') parser.add_argument('--res', type=int, default=1, help='residual connections') argv = parser.parse_args() print print argv print ######## # Mode # ######## if argv.mode == 'train': import train train.main(argv) elif argv.mode == 'test': import test test.main(argv) else: import eval eval.main(argv)
def eval_model(model_path): args = {'model.model_path': model_path} main(args)
def index(): #Make a folder named 'images' where uploaded images will be stored target = os.path.join(APP_ROOT, 'images/') if not os.path.isdir(target): os.mkdir(target) else: shutil.rmtree(target) os.mkdir(target) images_captions = [ ] #Stores the caption -i.e the final text extracted from images uploaded try: image_names = [] #Stores the image names that will be uploaded #caption_names=[]#Stores the corresponding text we want as caption below the image when it's clicked upload_image_list = request.files.getlist( 'file' ) #Get the names of the images uploaded through forms and request for image in upload_image_list: filename = image.filename image_names.append( filename) #This list will be passed through render template destination = '/'.join([ target, filename ]) #File location for storing the image in the 'images' folder image.save(destination) #Save the image in the images folder #=============================================================== eval.main() #Generates the border_image and txt of 8 co-ordinate #=============================================================== #Generating list that stores the all word_box_co-ordinates by reading the 4-co-ordinate txt img_with_noise_path = os.path.join(APP_ROOT, 'images/') plst_word_box_coordinates_path = os.path.join(APP_ROOT, 'output_label/') uploaded_images = os.listdir(img_with_noise_path) pstr_cropped_images_folder_path = os.path.join(APP_ROOT, 'output_crop') pstr_intermediate_output_folder_path = os.path.join( APP_ROOT, 'intermediate') inp = 'input.txt' #Hard_coded out = 'output' #Hard_coded #Calling the ocr on each image and generating the image and caption and storing it in 'images_captions' with each entry as tuple for image in uploaded_images: word_box_coordinates = [] pth = plst_word_box_coordinates_path + image.split( '.' )[0] + '.txt' #The code will fail if the image file name will contain another '.' with open(pth) as f: for line in f: temp = line.split(' ') box = [] for i in temp: box.append(int(i)) word_box_coordinates.append(box) img_path = img_with_noise_path + image image_temp, caption_temp = image, ocr_image_and_split_multiwords( word_box_coordinates, img_path, pstr_cropped_images_folder_path, pstr_intermediate_output_folder_path, inp, out) images_captions.append((image_temp, caption_temp)) return render_template("home_2.html", image_caption=images_captions) except: #When no image is selected and send button is clicked return render_template('save_upload.html')
def main(): global opt, model opt = parser.parse_args() print(opt) cudnn.benchmark = True print("===> Building model") model = Net() criterion = nn.MSELoss(reduction='sum') print("===> Using GPU %d" % opt.gpu) torch.cuda.set_device(opt.gpu) model = model.cuda(opt.gpu) criterion = criterion.cuda(opt.gpu) # optionally resume from a checkpoint if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) model.load_state_dict(checkpoint["model"].state_dict(), strict=False) else: print("=> no checkpoint found at '{}'".format(opt.resume)) if opt.quant: if os.path.isfile(opt.quant_param): model.quantize_from(opt.quant_param) print('model quantized from ' + opt.quant_param) else: print("=> no quantize checkpoint found at '{}'".format( opt.quant_param)) exit(1) if opt.blu: model.load_blu('blu_train.data') print('loaded blu from ' + 'blu_train.data') print("===> Setting Optimizer") #optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay) optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay) print("===> Loading datasets") #train_set = DatasetFromHdf5("data/train.h5") #training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) training_data_loader = ndarrayLoader('data\\input.data', 'data\\target.data', shuffle=True, batch_size=opt.batchSize) print("===> Training") for epoch in range(opt.start_epoch, opt.nEpochs + 1): train(training_data_loader, optimizer, model, criterion, epoch, opt) result = eval.main(model, opt.blu, 'Set5') with open('result.txt', 'a') as f: f.write('epoch:%d\n' % epoch) f.write(result) save_checkpoint(model, epoch)
def _test_build_graph_helper(self, eval_real_images): tf.flags.FLAGS.eval_real_images = eval_real_images eval.main(None, run_eval_loop=False)
help= "number of support examples per class in test. 0 means same as model's data.shot (default: 0)" ) parser.add_argument( '--data.test_query', type=int, default=0, metavar='TESTQUERY', help= "number of query examples per class in test. 0 means same as model's data.query (default: 0)" ) parser.add_argument('--data.test_episodes', type=int, default=1000, metavar='NTEST', help="number of test episodes per epoch (default: 1000)") parser.add_argument( '--seed', type=int, default=1234, ) parser.add_argument('--augment_stn', type=int, required=1, help='Augment outputs with STN') args = vars(parser.parse_args()) aug = bool(args['augment_stn']) print(aug) main(args, aug)
import preprocess import train import eval print('start preprocessing...') #preprocess.main() print('start training...') #train.main() print('start eval...') eval.main()
from eval import main __author__ = 'HELGJO' main(["E:/Github/assignment-2_helgjo/training.gt","E:/Github/assignment-2_helgjo/training.test"]) #eval.py "E:/Github/assignment-2_helgjo/training.gt" "E:/Github/assignment-2_helgjo/training.test"
type=int, default=0, metavar='TESTWAY', help= "number of classes per episode in test. 0 means same as model's data.test_way (default: 0)" ) parser.add_argument( '--data.test_shot', type=int, default=0, metavar='TESTSHOT', help= "number of support examples per class in test. 0 means same as model's data.shot (default: 0)" ) parser.add_argument( '--data.test_query', type=int, default=0, metavar='TESTQUERY', help= "number of query examples per class in test. 0 means same as model's data.query (default: 0)" ) parser.add_argument('--data.test_episodes', type=int, default=1000, metavar='NTEST', help="number of test episodes per epoch (default: 1000)") args = vars(parser.parse_args()) main(args)
from eval import main __author__ = 'HELGJO' main([ "E:/Github/assignment-2_helgjo/training.gt", "E:/Github/assignment-2_helgjo/training.test" ]) #eval.py "E:/Github/assignment-2_helgjo/training.gt" "E:/Github/assignment-2_helgjo/training.test"