def csv_upload_visualization(filename=''): """ This function is designed for users to upload a file to visualize """ # TODO # choose a file stored in the server # grab the url of the csv file # obtain row_offset and column_offset csv_row_offset = request.form['row_offset'] csv_column_offset = request.form['column_offset'] item_name_list = [] # get the current app location # app/visualization app_root = os.path.dirname(os.path.abspath(__file__)) download_dir = app_root + '/tempData/' # getItemName five variables: folder, file name, item in the csv, # first row offset, second row offset, coloumn offset util.getItemName(download_dir, filename, item_name_list, \ csv_row_offset, 0, csv_column_offset) input_filename = 'temp' + filename return render_template('visualization/csv_visualization_results.html', \ item_name_list=item_name_list, \ input_filename=input_filename)
def csv_new_upload_visualization(filename=''): """ This function is designed for users to upload a file to visualize """ # TODO # choose a file stored in the server # grab the url of the csv file # obtain row_offset and column_offset csv_row_offset = request.form['row_offset'] csv_column_offset = request.form['column_offset'] item_name_list = [] # get the current app location # app/visualization app_root = os.path.dirname(os.path.abspath(__file__)) download_dir = app_root + '/templates/UPLOAD_FOLDER/' # getItemName five variables: folder, file name, item in the csv, # first row offset, second row offset, coloumn offset util.getItemName(download_dir, filename, item_name_list, \ csv_row_offset, 0, csv_column_offset) # remove .csv from the end of file name file_origin_name = filename[:-4] input_filename = 'temp' + filename file_name = filename file_path = download_dir + input_filename #print file_path cpu_num = multiprocessing.cpu_count() # I don't know why funcion separate_csv does not work here, I used code directly # and it works #chunk = util.separate_csv(file_path, cpu_num) filename = file_path csv_handle = pd.read_csv(filename) csv_len = len(csv_handle) if csv_len%cpu_num == 0: chunk_size = csv_len/cpu_num elif csv_len%(cpu_num-1) == 0: chunk_size = (csv_len/(cpu_num-1))-1 else: chunk_size = csv_len/(cpu_num-1) chunk=[None]*cpu_num count=0 for chunk_piece in pd.read_csv(filename, chunksize=chunk_size): chunk[count] = chunk_piece count = count+1 # print chunk[0] # column_bool = [1]*len(csv_handle.columns) x_scale = util.x_date_max_min_of_csv(filename) y_scale = util.y_max_min_of_csv(filename,column_bool) # create a list of input class objects for the pool input_objects = [None]*cpu_num for i in range(cpu_num): input_objects[i] = CSVChunkInformation(file_origin_name,i,chunk[i],x_scale,y_scale,column_bool) # test # cpu_num = # separate work into cpu_number pieces pool = Pool(cpu_num) pool.map(util.plot, input_objects) #close pool pool.close() pool.join() # combine temp imgs and delete temp imgs temp_img_dir = app_root + '/data/img/' #temp_img_name = 'temp_fig_'+file_origin_name+'_0.png' result_img_name = file_origin_name+'_result.png' result_img_path = temp_img_dir+'../'+result_img_name util.combine_delete_image(temp_img_dir, result_img_path) return render_template('csv_new_visualization_results.html', \ item_name_list=item_name_list, \ input_filename=result_img_name, \ filename = file_name)