def upload_csv_files(files, upload_folder): """ get content of the uploaded files """ if 'Decision Matrix' not in files or ('Criteria Details' not in files): raise Exception( "Both the Decision Matrix file and the Criteria Details file are required" ) decision_matrix_file = files['Decision Matrix'] criteria_details_file = files['Criteria Details'] # check for uploaded files if decision_matrix_file.filename == '' or criteria_details_file.filename == '': raise Exception("Select files for upload") # check file type of decision matrix file and save file decision_matrix_file_path = save_file(decision_matrix_file, upload_folder) if decision_matrix_file_path == '': raise Exception("Allowed file type is csv") # check file type of criteria details file and save file criteria_details_file_path = save_file(criteria_details_file, upload_folder) if criteria_details_file_path == '': raise Exception("Allowed file type is csv") with open(decision_matrix_file_path, 'r', encoding='utf-8-sig') as csvfile: decision_matrix = list(csv.reader(csvfile, delimiter=';')) decision_matrix = np.array(decision_matrix) with open(criteria_details_file_path, 'r', encoding='utf-8-sig') as csvfile: criteria_details = list(csv.reader(csvfile, delimiter=';')) criteria_details = np.array(criteria_details) # delete uploaded csv files delete_file(decision_matrix_file_path) delete_file(criteria_details_file_path) return decision_matrix, criteria_details
def upload(): file = request.files.get("file") if request.method == 'POST' and file: new_filename = uuid.uuid4().__str__() fullname = os.path.join(config.UPLOAD_FOLDER, new_filename) try: helpers.save_file(file, new_filename) resp = midi_parser.get_json_for_file(fullname, file.filename) finally: helpers.delete_file(fullname) return resp return jsonify({'status': 'error', 'message': 'Cannot save file'})
def download_book(book_page_url: str, book_path: str, image_path: str) -> Dict or None: try: book_context = get_book_context(book_page_url) except NoFileAvaliable: return None book_file_name, book_content = get_book_content(book_context.get('book_file_download_url')) book_local_file_name = save_file(book_content, book_file_name, book_path) image_file_name, image_content = get_image_content(book_context.get('book_image_download_url')) image_local_file_name = save_file(image_content, image_file_name, image_path) book_context.update( book_path=book_local_file_name, img_src=image_local_file_name, ) return book_context
def main(credentials, to_retrieve): """Entry point.""" contar = ContAR(credentials) if to_retrieve == 'all': channels = [ch for ch in contar.channels] else: channels = [ ch for ch in contar.channels if ch['name'].lower() in to_retrieve ] if len(to_retrieve) < 1 or (len(channels) != len(to_retrieve) and to_retrieve != 'all'): print( "No appropiate channels selected!!\nWatch --help for list of channels." ) exit() all_data = [] for channel in channels: channel_data = contar.get_all_listing_data(channel) all_data.extend(channel_data) helpers.save_file("contar-v01", all_data)
def main(): """Entry point.""" all_data = get_all_data() helpers.save_file("encuentro-v05", all_data)
def call(self, data): filename = value_or_callable(self.filename, data) content = value_or_callable(self.content, data) save_file(filename, content)
parser = argparse.ArgumentParser( description='Download all files from links on a website', prog='main.py') parser.add_argument('--url', help='url for search', dest='url', required=True) parser.add_argument('--output-folder', dest='output_folder', help='folder where files will be stored', required=False, default='output') args = parser.parse_args() url = args.url output_folder = args.output_folder if __name__ == '__main__': webpage = BeautifulSoup( requests.get(url, allow_redirects=True).content, "html.parser") for a_tag in webpage.findAll("a"): link = a_tag.attrs.get("href") if link != "" or link is not None: downloaded_file = requests.get(f'{url}{link}') if (downloaded_file.headers['Content-Type'] in ['application/pdf', 'text/plain']): print(f'File to be downloaded: {link}') save_file(downloaded_file.content, f'{output_folder}/{link}') print('File downloaded successfully') else: print('This link cannot be downloaded. Only pdf and txt') else: print("The link is empty")
def main(): """Entry point.""" all_data = get_all_data() helpers.save_file("conectar-v05", all_data)
def main(): """Entry Point.""" all_data = get_all_data() helpers.save_file("bacua-v05", all_data)
def get_migrations(university, univercity_city, is_schools): result_dict = defaultdict(int) path = Path('datasets') / (university + '.csv') df = pd.read_csv(path, ';', encoding='ansi', low_memory=False) df = df.loc[:, ['city', 'home_town', 'schools', 'universities']] cols = len(df.columns) - 1 df[df.columns[[0, cols]]] = df.iloc[:, [0, cols]].astype(str) from_before = len(df.schools.values) if is_schools else len( df.universities.values) to_before = len(df.universities.values) if is_schools else len( df.city.values) if is_schools: df = df[(df['schools'] != '[]') & (df['schools'] != "nan") & (df['universities'] != "nan") & (df['universities'] != '[]')] df['schools'] = df['schools'].apply(format_json) df['universities'] = df['universities'].apply(format_json) from_after = len(df[df.schools != ''].schools.values) to_after = len(df[df.universities != ''].universities.values) else: df = df[(df['city'] != "nan") & (df['universities'] != "nan") & (df['universities'] != '[]')] df['universities'] = df['universities'].apply(format_json) from_after = len(df[df.universities != ''].universities.values) to_after = len(df[df.city != ''].city.values) stayed = income = size = 0 for index, row in df.iterrows(): sch = row['schools'] uni = row['universities'] city = row['city'] if (is_schools and sch == uni) or (not is_schools and uni == city): stayed += 1 size += 1 continue if is_schools and sch != uni and sch != '' and uni != '' and univercity_city == uni: result_dict[sch] += 1 income += 1 size += 1 elif not is_schools and uni != city and uni != '' and city != '' and city != 'nan' and uni == univercity_city: result_dict[city] += 1 income += 1 size += 1 sorted_directions = sorted(result_dict.items(), key=lambda x: x[1], reverse=True) result = [{ 'from': elm[0], 'quantity': elm[1], 'rate_all': round(elm[1] * 1.0 / size, 5), 'rate_migrated': round(elm[1] * 1.0 / income, 5) } for elm in sorted_directions] print('Местные/приехавшие: %s/%s. Кол-во населённых пунктов %s. Всего %s' % (stayed, income, len(result_dict), stayed + income)) print('Фильтрация. До %s/%s. После %s/%s' % (from_before, to_before, from_after, to_after)) if is_schools: res_type = 'schools' else: res_type = 'universities' if (len(result) > 0): helpers.save_file(university + '_mirgrations_' + res_type, result) else: print('Результат отсутствует')
X_tfidf_pos = tfidf_transformer.transform(X_pos) X_tfidf_neg = tfidf_transformer.transform(X_neg) X_tfidf_validate = tfidf_transformer.transform(X_validate) X_tfidf_validate_pos = tfidf_transformer.transform(X_validate_pos) X_tfidf_validate_neg = tfidf_transformer.transform(X_validate_neg) X_label_category_publish_name = article_contents.label_category_publish_name[ 0:train_row] X_label_category_publish_name_validate = article_contents.label_category_publish_name[ train_row:validate_row + train_row] X_label_story = article_contents.label_story[0:train_row] X_label_story_validate = article_contents.label_story[ train_row:validate_row + train_row] # Save training & validation data helpers.save_file(article_contents.iloc[0:train_row, ], model.data_dir + 'article.snappy.parquet') helpers.save_file( article_contents.iloc[train_row:validate_row + train_row, ], model.data_dir + 'article_validate.snappy.parquet') helpers.save_file( X_label_category_publish_name, model.data_dir + 'article_label_category_publish_name.pkl') helpers.save_file( X_label_category_publish_name_validate, model.data_dir + 'article_label_category_publish_name_validate.pkl') helpers.save_file(X_label_story, model.data_dir + 'article_label_story.pkl') helpers.save_file(X_label_story_validate, model.data_dir + 'article_label_story_validate.pkl') helpers.save_file(X, model.data_dir + 'article_count_vectorized.npz') helpers.save_file(
def process_formdata(self, valuelist): super(UploadFileField, self).process_formdata(valuelist) if self.data: f = save_file(self.data, 'img/users') self.data = f
'Receiving message from {0}'.format(client_address)) string = b'' while True: data = connection.recv(256) if data: string += data else: read = None if not helpers.is_binary_string(string): read = re.match(r'^/get ([a-zA-Z0-9]{8,})', string.decode()) if read: slug = read.group(1) with open(helpers.helpers.path_gen(slug), "rb") as out: connection.sendall(out.read()) else: # received end, save file, write path slug = helpers.slug_gen(config.slug_len) helpers.save_file(slug, string) helpers.log_to_stdout( 'File with length {0} saved at {1}'.format( len(string), slug)) connection.sendall( ('http://%s/%s\n' % (config.domain, slug)).encode()) break finally: connection.close()
def main(): """Entry point.""" all_data = get_all_data() helpers.save_file("ted1-v06", all_data)
def parse_json_files(university): bdays_male = list() bdays_female = list() bdays_all = list() #cities = list() directory = Path('friends_'+university) for fname in os.listdir(directory): if fname.endswith(".json"): path = directory / fname with io.open(path, encoding='utf-8') as json_file: male, female, all_ages = file_parse_bdays(json_file.read()) bdays_male += male bdays_female += female bdays_all += all_ages print('File passed. Quantity of rows in result: ', len(bdays_male)) #json_file.seek(0) #cities += file_parse_cities(json_file.read()) male_test = [row for row in bdays_male if row['age']!=-1] male_valid = [row for row in bdays_male if row['age']==-1] female_test = [row for row in bdays_female if row['age']!=-1] female_valid = [row for row in bdays_female if row['age']==-1] all_test = [row for row in bdays_all if row['age']!=-1] all_valid = [row for row in bdays_all if row['age']==-1] helpers.save_file(university + '_age_male_test', male_test) helpers.save_file(university + '_age_male_valid', male_valid) helpers.save_file(university + '_age_female_test', female_test) helpers.save_file(university + '_age_female_valid', female_valid) helpers.save_file(university + '_age_all_test', all_test) helpers.save_file(university + '_age_all_valid', all_valid)
def save_model(model, model_path=MODEL, weights_path=WEIGHTS): model_json = model.to_json() with open(model_path, "w") as json_file: json_file.write(model_json) weights = model.get_weights() helpers.save_file(weights, weights_path)
def main(): """Entry point.""" all_data = get_all_data() helpers.save_file("dqsv-v05", all_data)
def download_file(url, filename): response = requests.get(url) save_file(filename, response.content)