def deploy(self): # Create the new folder os.chdir(rootpath) self.log('Create new folder') if os.path.exists(rootpath + '/new'): os.system('sudo rm -r new') self.run_command('mkdir new') os.chdir(rootpath + '/new') try: # Clone repository self.log('Clone repo') self.run_command(f'git clone https://{USER}:{PASS}@{VS_URL} .') # Copy variables to new folder os.chdir(rootpath) self.log('Copy variables.py') self.run_command('cp variables.py new/') # Create new env os.chdir(rootpath + '/new') self.log('Create new virtual env') self.run_command('virtualenv venv --python=python3.6') # Install dependencies self.log('Install dependencies') self.run_command('venv/bin/pip install -r requirements.txt') # Run migrations self.log('Run migrations') self.run_command('venv/bin/python3 manage.py migrate') # Temporary rename venv to change her directory os.chdir(rootpath + '/new') self.run_command('mv venv new_venv') self.run_command('mv new_venv ./..') except Exception: # Delete new folder os.chdir(rootpath) self.log('Reverting...') os.system('sudo rm -r new') os.system('sudo rm -r new_venv') return None # All right, swap versions self.log('Updating versions') # Get actual name folder os.chdir(rootpath) version = read_from_file('last_version') # Rename folders self.run_command(f'mv current {version}') self.run_command('mv new current') self.run_command(f'mv venv venv{version}') self.run_command('mv new_venv venv') # Get next version version = calc_next_version(version) # Update version control file write_on_file('actual_version', version) write_on_file('last_version', version) # Restart server self.log('Restarting the server') self.run_command('sudo systemctl restart apache2') # Everything worked self.log(get_finished_fallback()) self.log(get_update_fallback(version))
from nltk.corpus import wordnet as wn from nltk.wsd import lesk import utils import nlp_utils answers = 0 correct_answers = 0 nltk_correct_answers = 0 corpus = utils.read_from_file('corpus.json') for text in corpus[:1]: for sentence in text: for word in filter(lambda w: w['sense'] != '', sentence): max_overlap = 0 best_sense = "Unavailable" context = list( map( lambda w: w['lemma'], filter( lambda w: w['sense'] != '' and w['lemma'] != word[ 'lemma'], sentence))) # print("Target sense:", word['sense']) for synset in wn.synsets(word['lemma']): definition = set(synset.definition().split(' ')) examples = set( utils.matrix_to_array( [x.split(' ') for x in synset.examples()])) definition = set(definition.union(examples)) definition = set( nlp_utils.remove_punctuation(s) for s in definition)
from graph import Graph import utils if __name__ == "__main__": v, adjacency_matrix = utils.read_from_file("matrix") edges = utils.get_edges(adjacency_matrix) g = Graph(v) utils.add_edges(g, edges) g.dfs(2) print("-----------------------------") g1 = Graph(v) utils.add_edges(g1, edges) g1.bfs(4) print("-----------------------------")
def deploy(self): # Create the new folder os.chdir(rootpath) self.log('Create new folder') if os.path.exists(rootpath + '/new'): os.system('sudo rm -r new') self.run_command('mkdir new') os.chdir(rootpath + '/new') try: # Clone repository self.log('Clone repo') self.run_command(f'git clone https://{USER}:{PASS}@{VS_URL} .') # Install dependencies and compile self.log('Install dependencies') self.run_command('npm install') # Compiling js self.log('Compile js') self.run_command('polymer build') except Exception: # Delete new folder os.chdir(rootpath) self.log('Reverting...') os.system('sudo rm -r new') return None # Move folder to destination self.log('Moving folder to /var/www/html/Site') self.run_command(f'mv build/es5-bundled {serverpath}') # Clean folder os.chdir(rootpath) self.log('Clean the new build folder') os.system('sudo rm -r new') # Go to destination path os.chdir(serverpath) # Get actual name folder version = read_from_file('last_version') # Rename folders self.log('Updating versions') self.run_command(f'mv current {version}') self.run_command('mv es5-bundled current') # Get next version version = calc_next_version(version) # Update version control file write_on_file('actual_version', version) write_on_file('last_version', version) # Restart server self.log('Restarting the server') self.run_command('sudo systemctl restart apache2') # Everything worked self.log(get_finished_fallback()) self.log(get_update_fallback(version))
def transporter(request): data = read_from_file(LINER_MANIFEST_FILE_NAME) return render_to_response('efr8/transporter.html', {'manifest_list': data})
def port_vessel_eta(request): data = read_from_file(LINER_MANIFEST_FILE_NAME) return render_to_response('efr8/port_vessel_eta.html', {'manifest_list': data})
def run_worker(): config = read_from_file(path_to_file) solver = GeneticSolver.from_config(config, generations=100, population_size=200) for individual in solver.population.individuals: matrix = individual.genes.to_matrix() print(matrix, file=sys.stderr)
instr = {} if sys.argv.__contains__('-i'): attrs = list() print("Введите используемые символы: ", end="") letters = input() attrs.append(letters.strip().split()) print("Введите длину искомого слова: ", end="") k = int(input()) attrs.append([k]) print("Введите уже найденные символы [номер_символа:символ]: ", end="") found_letters = input() if len(found_letters) > 0: attrs.append([found_letters]) instr = make_instructions(attrs) else: instr = make_instructions(read_from_file("input")) letters = instr.get("letters") k = instr.get("k") found_letters = instr.get("found") possible_words = [arr_to_str(i) for i in sorted(remove_extra(generate_all(letters, k), found_letters))] morph = pymorphy2.MorphAnalyzer() res = set() for word in possible_words: if morph.word_is_known(word): res.add(word) write_to_file("output", res)
# global_homophilies = utils.read_from_file('AMD', 'remove_with_probability_global_homophilies') # utils.plot_all(class_partition, global_homophilies, homophily_per_clas, 'E', 'AMD', 'remove_with_probability') # class_partition = utils.read_from_file('CSphd', 'remove_with_probability_class_partitions') # homophily_per_clas = utils.read_json('CSphd', 'remove_with_probability_homophily_per_clas.json') # global_homophilies = utils.read_from_file('CSphd', 'remove_with_probability_global_homophilies') # utils.plot_all(class_partition, global_homophilies, homophily_per_clas, '80', 'CSphd', 'remove_with_probability') # class_partition = utils.read_from_file('Yeast', 'remove_with_probability_class_partitions') # homophily_per_clas = utils.read_json('Yeast', 'remove_with_probability_homophily_per_clas.json') # global_homophilies = utils.read_from_file('Yeast', 'remove_with_probability_global_homophilies') # utils.plot_all(class_partition, global_homophilies, homophily_per_clas, 'U', 'Yeast', 'remove_with_probability') ### add_big_random class_partition = utils.read_from_file('blogs', 'add_big_random_class_partitions') homophily_per_clas = utils.read_json('blogs', 'add_big_random_homophily_per_clas.json') global_homophilies = utils.read_from_file('blogs', 'add_big_random_global_homophilies') utils.plot_all(class_partition, global_homophilies, homophily_per_clas, '1', 'blogs', 'add_big_random') class_partition = utils.read_from_file('AMD', 'add_big_random_class_partitions') homophily_per_clas = utils.read_json('AMD', 'add_big_random_homophily_per_clas.json') global_homophilies = utils.read_from_file('AMD', 'add_big_random_global_homophilies') utils.plot_all(class_partition, global_homophilies, homophily_per_clas, 'E', 'AMD', 'add_big_random')
wait_and_click( driver, "ContentContainer1_ctl00_Content_QuickSearch1_ctl02_TabSavedSearches") print("Saved searches") # retrieve first saved search wait_and_click( driver, "ContentContainer1_ctl00_Content_QuickSearch1_ctl02_MySavedSearches1_DataGridResultViewer_ctl04_Linkbutton1" ) print("First saved search") remaining = True while remaining: RANGE_STR = utils.read_from_file(utils.get_param('rangefile')) if RANGE_STR is not None: try: RANGE_FROM = RANGE_STR.split("-")[0] RANGE_TO = RANGE_STR.split("-")[1] print("Getting range " + RANGE_STR) # go to export navigation.wait_and_click( driver, "ContentContainer1_ctl00_Content_ListHeader_ListHeaderRightButtons_ExportButtons_ExportButton" ) time.sleep(5) #print(driver.window_handles) driver.switch_to_window(driver.window_handles[1]) print("Switch to export window")
def solve_1(filename): program = Program(format_input(read_from_file(filename))) while not program.is_in_loop(): program.execute() return program.accumulator
def solve_2(filename): passports = format_input(read_from_file(filename)) valid_passports = [ passport for passport in passports if passport.is_valid_2() ] return len(valid_passports)
def solve_2(filename): expressions = format_input(read_from_file(filename)) return sum([expression.solve_2() for expression in expressions])
logging.basicConfig(level=logging.INFO) logger = logging.getLogger() # create a text file with each line containing a sentence you want to tweet #To run the bot do the following from command line: #python tweetlinesfromfile.py # == OAuth Authentication == api = create_api() f_name_read = os.path.dirname( os.path.realpath(__file__)) + os.sep + 'campaign_tweets.txt' f_name_write = os.path.dirname( os.path.realpath(__file__)) + os.sep + 'streamed_tweets.txt' tweet_no = 0 f = utils.read_from_file(f_name_read) interval = 15 internal_interval = 2 tweet_bunch = 10 i = 1 for line in f: if utils.tweet_exists(f_name_write, line): continue try: logger.info(f"Tweeting: {line}") api.update_status(line) logger.info(f"Writing tweet {utils.increment(tweet_no)} to file") tweet_no = tweet_no + 1 utils.write_to_file(f_name_write, line) if i == tweet_bunch:
def solve_1(filename): boarding_passes = format_input(read_from_file(filename)) return max([get_seat_id(bp) for bp in boarding_passes])
def solve_2(filename): active_nodes = format_input(read_from_file(filename)) for _ in range(6): active_nodes = next_4d(active_nodes) d_print_node_state(active_nodes) return len(active_nodes)
action='store_true') p.add_argument('--number_of_medoids', help='Number of medoids to find. Default = 10', default=10, type=int) p.add_argument('--numlocal', help='Number of local minimum to obtain. Default = 20', default=20, type=int) p.add_argument('--maxneighbor', help='Maximal number of neighbors in claster. Default = 80', default=80, type=int) p.add_argument('--output', help='Output file name. Default = output.txt', default='output.txt') p.add_argument('--input', help='Input file name. Default = data.txt', default='data.txt') args = p.parse_args() objects = read_from_file(args.input, polygons=args.polygons) clarans_model = Clarans(objects, args.numlocal, args.maxneighbor, args.number_of_medoids, args.polygons) medoids, objects = clarans_model.run() write_to_file(args.output, objects, polygons=args.polygons) plot_info(medoids, objects) plot_data(objects, medoids=medoids, clusters=True, polygons=args.polygons)
import argparse import random from utils import plot_data, read_from_file random.seed() if __name__ == "__main__": p = argparse.ArgumentParser() p.add_argument('-p', '--polygons', help='Use polygons instead of points.', action='store_true') p.add_argument('--filename', help='Input file name. Default = data.txt.', default='data.txt') args = p.parse_args() objects = read_from_file(args.filename, polygons=args.polygons) plot_data(objects, polygons=args.polygons)
#!/usr/bin/env python3 import utils import datetime import time SCRAPED_DATA = "/home/anon/Python/Covid/data" # the scraped data ACCESSED_TIME_FILE = "/home/anon/Python/Covid/last_accessed" # file containing datetime for last time data scraped if utils.checked_in_last_hour(ACCESSED_TIME_FILE): print(utils.read_from_file(SCRAPED_DATA)) print('read from file') else: cases = utils.scrape_confirmed_cases( "https://covid19.gov.im/general-information/latest-updates/") print(cases) utils.write_to_file(cases, SCRAPED_DATA) now = datetime.datetime.now() utils.write_to_file(str(datetime.datetime.timestamp(now)), ACCESSED_TIME_FILE) print('read from web') """ now = datetime.datetime.now() utils.write_to_file(str(datetime.datetime.timestamp(now)), ACCESSED_TIME_FILE) #convert file contents to float then int or you get an error time = datetime.datetime.fromtimestamp(int(float(utils.read_from_file(ACCESSED_TIME_FILE)))) print(time)
def liner_view_manifests(request): data = read_from_file(LINER_MANIFEST_FILE_NAME) return render_to_response('efr8/liner_view_manifests.html', {'manifest_list': data})
time.sleep(0.3) def run(self): self.thread = threading.Thread(target=self.scan_tx) self.thread.start() def stop(self): self.stop_flag = True self.thread.join() if __name__ == '__main__': node = Node() node.run() print("input (file name or stop):") while True: input_str = input() if input_str == "stop": node.stop() break txs = read_from_file(input_str) for tx in txs: from_adress, to_address, amount = tx from_account = node.blockchain.state.get_account(from_adress) raw_tx = Transaction(from_adress, to_address, amount) node.blockchain.tx_pool.append(raw_tx)
from datetime import datetime from utils import read_from_file from pkg_1.esercizio1 import list_routes while True: print("Su quale file vuoi fare il test?: ") test_file = input("Digita la tua scelta: (un numero da 1 a 6)") airports, flights = read_from_file("../tests/test" + test_file) print("\nTEST "+test_file+"\n") print(" AEROPORTI ") for i in range(len(airports)): print(i, " - ", airports[i]) start_index = 0 # int(input("Scegli indice aeroporto di partenza ")) end_index = 3 # int(input("Scegli indice aeroporto di arrivo ")) departure_time = "16:00" # input("Inserire ora hh:mm di parteza ") max_time = "23:00" # input("Inserire tempo massimo per la tratta hh:mm ") start = airports[start_index] end = airports[end_index] starting_time = datetime.strptime(departure_time, "%H:%M").time() total_time = datetime.strptime(max_time, "%H:%M").time() start_time_minutes = starting_time.hour * 60 + starting_time.minute total_time_minutes = 50 * 60 # total_time.hour*60 + total_time.minute paths = list_routes(flights, start, end, start_time_minutes, total_time_minutes) print("\n\nPercorsi da " + str(start) + " a " + str(end) + " in " + str(total_time_minutes) + " minuti ")
def solve_2(filename): passwords = format_input(read_from_file(filename)) return len([p for p in passwords if p.is_valid_2()])
import random import reducebykey as reduce import utils from math import log import numpy as np answers = 0 correct_answers = 0 allwords = utils.read_from_file('allwords.json') allwords_with_sensekey = utils.read_from_file('allwords_with_sensekey.json') possible_senses = utils.read_from_file('possibile_senses.json') corpus = utils.read_from_file('corpus.json') print("Counting occurrencies...") # Count all occurrencies of a word in the corpus # count(w_j) count_w = list(map(lambda w: (w, 1), allwords)) count_w = list(reduce.reduceByKey(lambda x, y: x + y, count_w)) count_w.sort(key=lambda x: x[0], reverse=True) utils.print_to_file('count_w.json', count_w) # Count all occurrencies of a (word, sense) in the corpus # count(s_i, w_j) count_ws = list(map(lambda w: (w, 1), filter(lambda w: w[1] != '', utils.matrix_to_array(allwords_with_sensekey)))) count_ws = list(reduce.reduceByKey(lambda x, y: x + y, count_ws)) count_ws.sort(key=lambda x: x[1], reverse=True) utils.print_to_file('count_sw.json', count_ws) # Count all occurrencies of a sense in the corpus # count(s_i)
def _index_neighbours(self, i, j): return set(product([i - 1, i, i + 1], [j - 1, j, j + 1])) - {(i, j)} def selection(self): individual1, index = self._selection() return individual1, self._selection(excluded=index)[0] def _selection(self, excluded=None): i = 0 turnir = [] while i < self.size_turnir: index = randint(0, self.size - 1) if excluded is not None and index == excluded: continue turnir.append((self.individuals[index], index)) i += 1 return max(turnir) def add(self, o): self.individuals.append(o) if __name__ == '__main__': config = read_from_file( '/Users/dan.ailenei/myprojects/Semester-6/Stratec/data/Step_One.csv') solver = GeneticSolver.from_config(config, generations=100, population_size=200) solver.solve()
shutil.move(path_ori, path_target) print( f'Validation dataset build finished! face: {face_val}, background: {background_val}' ) if __name__ == "__main__": PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__)) IOU_pos, IOU_neg = 0.7, 0.3 path_train = ''.join( [PROJECT_ROOT, '/data/FDDB_crop/iou_', str(IOU_pos), '/train/']) path_val = ''.join( [PROJECT_ROOT, '/data/FDDB_crop/iou_', str(IOU_pos), '/val/']) for path in [path_train, path_val]: for label in ['0/', '1/']: if not os.path.exists(path + label): os.makedirs(path) print("Start to prepare dataset") annotations = read_from_file(PROJECT_ROOT + "/data/FDDB/FDDB-folds/") datasets = Data(annotations) prepare_data(datasets, annotations, threthoud_pos=IOU_pos, threthoud_neg=IOU_neg, save_path=path_train) dataset_split(path_train + '1/', path_train + '0/', path_val)
import argparse import sys from utils import get_reverse_complement, read_from_file def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--fn_input', help='input file, can be .txt or .fasta' ) args = parser.parse_args() return args def process_seqs_for_grep(list_seqs): list_rc = [] for seq in list_seqs: list_rc.append(get_reverse_complement(seq)) set_all = set(list_seqs).union(set(list_rc)) return set_all if __name__ == '__main__': args = parse_args() fn_input = args.fn_input list_seqs = read_from_file(fn_input) sys.stderr.write('Length of list {0} = {1}\n'.format(fn_input, len(list_seqs))) set_all = process_seqs_for_grep(list_seqs) sys.stderr.write('Size of set for forward/reverse seqs = {}\n'.format(len(set_all))) print ('\|'.join(set_all))
def show_actual_version(self): # Get actual version os.chdir(rootpath) version = read_from_file('actual_version') self.message(get_version_fallback('backend', version))
def run(): # ===================== с группировкой по дням ===================================== data = generate_data(172801) write_to_file(data) data = read_from_file() iterate(data, is_days=True)