def _load_tasks(cls, input_path, args, label_config_file): with io.open(label_config_file, encoding='utf8') as f: label_config = f.read() task_loader = Tasks() if args.input_format == 'json': return task_loader.from_json_file(input_path) if args.input_format == 'json-dir': return task_loader.from_dir_with_json_files(input_path) input_data_tags = cls.get_input_data_tags(label_config) if len(input_data_tags) > 1: val = ",".join(tag.attrib.get("name") for tag in input_data_tags) print('Warning! Multiple input data tags found: ' + val + '. Only first one is used.') elif len(input_data_tags) == 0: raise ValueError( 'You\'ve specified input format "{fmt}" which requires label config being explicitly defined. ' 'Please specify --label-config=path/to/config.xml or use --format=json or format=json_dir' .format(fmt=args.input_format)) input_data_tag = input_data_tags[0] data_key = input_data_tag.attrib.get('value').lstrip('$') if args.input_format == 'text': return task_loader.from_text_file(input_path, data_key) if args.input_format == 'text-dir': return task_loader.from_dir_with_text_files(input_path, data_key) if args.input_format == 'image-dir': return task_loader.from_dir_with_image_files(input_path, data_key) if args.input_format == 'audio-dir': return task_loader.from_dir_with_audio_files(input_path, data_key) raise RuntimeError('Can\'t load tasks for input format={}'.format( args.input_format))
def main(): import argparse parser = argparse.ArgumentParser( description='Exhaustively try project-toolchain combinations') parser.add_argument('--project', default=None, nargs="+", help='run given project only (default: all)') parser.add_argument('--toolchain', default=None, nargs="+", help='run given toolchain only (default: all)') parser.add_argument('--out-prefix', default='build', help='output directory prefix (default: build)') parser.add_argument( '--build_type', default='generic', help= 'Type of build that is performed (e.g. regression test, multiple options, etc.)' ) parser.add_argument('--build', default=None, help='Build number') parser.add_argument('--parameters', default=None, help='Tool parameters json file') parser.add_argument('--fail', action='store_true', help='fail on error') parser.add_argument('--verbose', action='store_true', help='verbose output') args = parser.parse_args() tasks = Tasks(src_dir) args_dict = {"project": args.project, "toolchain": args.toolchain} task_list = tasks.get_tasks(args_dict) params_file = args.parameters params_strings = [None] if params_file: params_strings = [] assert len( args.toolchain ) == 1, "A single toolchain can be selected when running multiple params." params_helper = ToolParametersHelper(args.toolchain[0], params_file) for params in params_helper.get_all_params_combinations(): params_strings.append(" ".join(params)) runner = Runner(task_list, args.verbose, args.out_prefix, root_dir, args.build_type, args.build, params_strings) runner.run() runner.collect_results() result = print_summary_table(args.out_prefix, args.build_type, args.build) if not result and args.fail: print("ERROR: some tests have failed.") exit(1)
def get_tasks(self): packages = self.get_packages_and_dependencies() tasks = Tasks() for package, dependencies in packages.iteritems(): tasks.add(package, dependencies) return tasks
def trainNTM(): t = Tasks() x_train, y_train = t.sequence_type_1(2000) ntm = NTM(10, 20) ntm.train(x_train, y_train, 1, maxEpoch=25, learning_rate=0.0006)
def compareFixed(): t = Tasks() x_test, y_test = t.sequence_type_1(100) add_params, mul_params = torch.load('program_memory/add.pt'), torch.load( 'program_memory/mul.pt') hnm = HNM(10, 20, add_params, mul_params) hnm.load_state_dict(torch.load("learned_params/hnm_arch_2.pt")) ntm = NTM(10, 20) ntm.load_state_dict(torch.load("learned_params/ntm.pt")) lstm = LSTM(14, 256, 325, 1) lstm.load_state_dict(torch.load("learned_params/lstm.pt")) hnm_diff, lstm_diff, ntm_diff = 0, 0, 0 for i in range(len(x_test)): hnm_out = hnm.recurrent_forward(x_test[i:i + 1]) ntm_out = ntm.recurrent_forward(x_test[i:i + 1]) lstm_out = lstm.recurrent_forward(x_test[i:i + 1]) answer = np.argmax(y_test[i:i + 1].detach().numpy()) hnm_diff += abs(answer - np.argmax(hnm_out.detach().numpy())) ntm_diff += abs(answer - np.argmax(ntm_out.detach().numpy())) lstm_diff += abs(answer - np.argmax(lstm_out.detach().numpy())) print(hnm_diff / len(y_test), ntm_diff / len(y_test), lstm_diff / len(y_test))
def __init__(self, runtime_env, ini_file="config.ini", os=None): self.logger = Logger(log_level=0, console_level=3) self.sts = Settings(self.logger, runtime_env, ini_file=ini_file) # sts self.logger.set_console_level(self.sts.debug_level) self.logger.set_log_level(0) self.comfun = CommonFunctions(self.logger) if os is None: self.os = platform.system() if self.os == "Windows": self.os = "w" elif self.os == "Linux": self.os = "l" else: self.os = None # abbreviation for very often used variables, helping with identification the main modules self.usr = Users(self) # usr self.prj = Projects(self) # prj self.sch = Schemas(self) # sch self.tsk = Tasks(self) # tsk self.que = Queue(self) # que self.nod = SimNodes(self) # nod self.dfn = Definitions(self) # dfn self.sio = StorageInOut(self) # sio # abbreviation END self.logger.inf("SimBatch started")
def processMain(matrixIdx): global mainPidProcess threads = [] actualTasks = Tasks(os.getpid() - mainPidProcess, matrixIdx) lockErrors = threading.Lock() lockCompleted = threading.Lock() # for i in range(numthreads): # threads.append(Thread(target=threadsVerify, args=(matrixIdx,actualTasks))) # for i in range(numthreads): # threads[i].start() # for i in range(numthreads): # threads[i].join() with ThreadPoolExecutor(max_workers=numthreads) as pool: for i in range(9): pool.submit(checkRow, matrixIdx, actualTasks, lockErrors, lockCompleted) pool.submit(checkCollum, matrixIdx, actualTasks, lockErrors, lockCompleted) pool.submit(checkArea, matrixIdx, actualTasks, lockErrors, lockCompleted) with lock: actualTasks.setFinalLog()
def test_filter_today_tasks(self): tasks_obj = Tasks() tasks_obj.add_task(Task("Today")) tomorrow = datetime.date.today() + datetime.timedelta(days=1) tasks_obj.add_task(Task('Tomorrow', date=tomorrow)) today_tasks = tasks_obj.today() self.assertEqual(len(today_tasks), 1) self.assertTrue(today_tasks[0].title == 'Today')
def __init__(self, fileName=None): fileName = 'tasks.json' if fileName is None else fileName # get the args object self.args = self.createArgsObj() self.conf = self.getDefaultConf() tasks_dir = self.args.task_dir self.tasks = Tasks(fileName, tasks_dir)
def create_tasks(self): tasks = Tasks() tasks.add(self.check_gamepad_timeout) tasks.add_periodic(300, self.check_gamepad_battery) if self._tv: tasks.add_periodic(60, self.check_tv_power_status) return tasks
def __init__(self, add_tomorrow): super().__init__() self.path = None self.columns = [] self.v = Values() self.t = Tasks() self.l = Locks() self.ts = TimeSheet() self.table = self.compare_goal_actual(add_tomorrow)
def trainHarvard(): t1 = Tasks() x_train, y_train = t1.sequence_type_1(2000) add_params, mul_params = torch.load('program_memory/add.pt'), torch.load( 'program_memory/mul.pt') hnm = HNM(10, 20, add_params, mul_params) hnm.train(x_train, y_train, 1)
def test_filter_other_days(self): tasks_obj = Tasks() next_days = datetime.date.today() + datetime.timedelta(days=4) tasks_obj.add_task(Task('Today', date=next_days)) next_days += datetime.timedelta(days=3) tasks_obj.add_task(Task('Next Date', date=next_days)) today_list = tasks_obj.today() # print(today_list) self.assertListEqual(today_list, [])
def test_finds_tomorrows_tasks(self): task_list = Tasks() next_week = datetime.date.today() + datetime.timedelta(days=7) task_list.add_task(Task("important meeting", date=next_week)) tomorrow = datetime.date.today() + datetime.timedelta(days=1) task_list.add_task(Task("John's birthday", date=tomorrow)) result = task_list.tomorrow() self.assertEqual(len(result), 1) self.assertEqual(result[0].title, "John's birthday")
def get_tasks(): tasks = Tasks() tasks.add(CreateWorkDirsTask()) tasks.add(CreatePlatformDirsTask()) add_download_tasks(tasks) tasks.add(BuildOpenSSLTask()) tasks.add(IOSBuildOpenSSLTask()) tasks.add(BuildSQLCipher()) tasks.add(BuildSQLite4JavaTask()) return tasks
def getDataModel(): return Model("mbedos5", [ Tasks(), Timers(), Stacks(), MemoryPools(), Mailboxes(), MessageQueues(), Mutexes(), Semaphores(), System() ])
def login(self, email, password): """ Login user to Producteev. """ response = self.__request( 'users/login', email=email, password=password) self.token = response['login']['token'] self.users = Users(self) self.tasks = Tasks(self) self.dashboards = Dashboards(self) self.labels = Labels(self) self.activities = Activities(self) self.subtasks = Subtasks(self)
def reload(self): """ Reads the json file if existant """ from tasks import Tasks try: with open(self.__file_path, 'r') as f: temp = json.load(f) for key, val in temp.items(): n_t = Tasks(**val) self.all()[key] = n_t self.order.append([n_t.id, 0]) except Exception as error: print(error) return
def main(): # Moving the global env to local one (located in the folder) # TOKEN = environ.get('token') load_dotenv() TOKEN = os.getenv('token') intents = discord.Intents.all() bot = commands.Bot(command_prefix=('хт ', 'ht '), help_command=None, intents=intents) bot.add_cog(Events(bot)) bot.add_cog(Commands(bot)) bot.add_cog(Tasks(bot)) bot.load_extension("jishaku") bot.run(TOKEN)
def add_task_to_db(entry_list): # take all the info enterd into the fields and pass to save to db method # need to convert the dates into the correct ISO start_date = pick_start_date_entry.get() print(convert_date_postgres(start_date, 'i')) new_task = Tasks( entry_list['Project ID'].get(), entry_list['Task Description'].get(), entry_list['Task MES Lead'].get(), convert_date_postgres(pick_start_date_entry.get(), 'i'), convert_date_postgres(pick_end_date_entry.get(), 'i'), '', entry_list['Task Triage'].get(), '', ) new_task.save_to_db()
def add_task(): name = input("Name of the task: ") date = input("Deadline of this task (DD-MM-YYYY): ") if date[5] != '-' or date[2] != '-' or len(date) > 10: print("Wrong input!") add_task() values = list(map(int, date.split('-'))) if values[0] < 1 or values[0] > 31: print("Wrong input!") add_task() elif values[1] < 1 or values[1] > 12: print("Wrong input!") add_task() elif values[2] < 2020: print("Wrong input!") add_task() description = input("Description of the task: ") t = Tasks(name, date, description) sqlite.add_task(t)
def api_import(): project = project_get_or_create() # make django compatibility for uploader module class DjangoRequest: POST = request.form GET = request.args FILES = request.files data = request.json if request.json else request.form content_type = request.content_type start = time.time() # get tasks from request parsed_data = uploader.load_tasks(DjangoRequest(), project) # validate tasks validator = TaskValidator(project) try: new_tasks = validator.to_internal_value(parsed_data) except ValidationError as e: return make_response(jsonify(e.msg_to_list()), status.HTTP_400_BAD_REQUEST) max_id_in_old_tasks = -1 if not project.no_tasks(): max_id_in_old_tasks = project.source_storage.max_id() new_tasks = Tasks().from_list_of_dicts(new_tasks, max_id_in_old_tasks + 1) project.source_storage.set_many(new_tasks.keys(), new_tasks.values()) # update schemas based on newly uploaded tasks project.update_derived_input_schema() project.update_derived_output_schema() duration = time.time() - start return make_response( jsonify({ 'task_count': len(new_tasks), '_count': validator.completion_count, 'prediction_count': validator.prediction_count, 'duration': duration, 'new_task_ids': [t for t in new_tasks] }), status.HTTP_201_CREATED)
def initControls(self): """Init basic controls""" self.outputArea.setString_("") self.tasks = Tasks() if userPrefs.showWorkTill: self.workTillBox.setHidden_(False) else: self.workTillBox.setHidden_(True) self.pbtnProject.removeAllItems() self.pbtnProject.addItemsWithTitles_(Projects.get()) self.pbtnProject.selectItemWithTitle_(userPrefs.selectedProject) self.projectChange_(None) self.initDoneButton() self.fillTasks() self.scrollToEnd()
async def on_ready(): global info, tasks, non_roll_channel, roll_channel, user print(f'Bot connected as {bot.user.name}') non_roll_channel = bot.get_channel(conf.NON_ROLL_CHANNEL_ID) roll_channel = bot.get_channel(conf.ROLL_CHANNEL_ID) pokemon_channel = None if conf.POKEMON_CHANNEL_ID: pokemon_channel = bot.get_channel(conf.POKEMON_CHANNEL_ID) # send 'tu' command to initialise the times await non_roll_channel.send(f'{conf.COMMAND_PREFIX}tu') try: message = await bot.wait_for('message', check=parse_tu, timeout=conf.MESSAGE_WAIT_SECS) info = parse_tu(message) except asyncio.TimeoutError: print("could not parse tu, try running bot again") sys.exit() else: tasks = Tasks(bot, non_roll_channel, roll_channel, pokemon_channel,\ info['claim_reset'], info['claim_available'],\ info['num_rolls'], info['rolls_reset'],\ info['daily_available'], info['daily_reset'],\ info['dk_available'], info['dk_reset']) # loop to roll and claim bot.loop.create_task(tasks.wait_for_roll()) # loop to update claim availability bot.loop.create_task(tasks.wait_for_claim()) # loop for free daily kakera bot.loop.create_task(tasks.wait_for_dk()) # loop for free daily roll bot.loop.create_task(tasks.wait_for_daily()) # loop for pokemon rolls bot.loop.create_task(tasks.wait_for_p())
def add_tasks(self, force): params = self.get_params() tasks_file = path(params["tasks_path"]) completed_file = tasks_file.dirname().joinpath("completed.json") if tasks_file.exists() and not force: tasks = Tasks.load(tasks_file) completed = Tasks.load(completed_file) else: tasks, completed = Tasks.create(params) tasks.save(tasks_file) completed.save(completed_file) task_queue = self.get_task_queue() added_tasks = Tasks() for task_name in sorted(tasks.keys()): task = tasks[task_name] complete = completed[task_name] if force or not complete: task_queue.put(task) added_tasks[task_name] = task logger.info("%d tasks queued", len(added_tasks)) return added_tasks
def add_entry(self, value_id, task, how_long, datestr=str(datetime.today().date())): t = Tasks() try: points = t.table[(t.table.value_id == value_id) & (t.table.task == task)].points.iloc[0] except IndexError: print('no task {} for value {}'.format(task, value_id)) else: self.table = self.table.append( { 'datestr': datestr, 'value_id': value_id, 'task': task, 'entry': np.round(how_long, 2), 'points': np.round(how_long * points, 2) }, ignore_index=True).sort_values(['datestr', 'value_id'], ascending=[False, True]) self.record()
def plot_consumption(battery, tasks): observation_time = [] consumption = [] observation_time.append(38) consumption.append(battery.tasks_consumption(38, tasks)) consumption.append(battery.tasks_consumption(380, tasks)) tasks.plot(observation_time, consumption) if __name__ == "__main__": t1 = task('1', 7, 18, 650) t2 = task('2', 5, 10, 800) t3 = task('3', 8, 26, 400) t4 = task('4', 10, 38, 380) tasks = Tasks() tasks.add_task(t1) tasks.add_task(t2) tasks.add_task(t3) tasks.add_task(t4) battery = Battery() ### Phase 1 # Prescheduling using EDF scheduable = EDF(tasks) if not scheduable: raise Exception('Task not scheduable.')
if searchscreen == 'a': s.date_search() continue elif searchscreen == 'b': s.time_search() continue elif searchscreen == 'c': s.string_search() continue elif searchscreen == 'd': s.regex_search() continue if searchscreen == 'e': homescreen else: continue elif homescreen == 'c': os.system('clear') print("Thanks for Using the Work Log Program!") print("Come Again Soon!\n") break else: break m = Main() t = Tasks() s = Searches() m.LogLoop()
def trainLSTM(): t = Tasks() x_train, y_train = t.sequence_type_1(2000) lstm = LSTM(14, 256, 325, 1) lstm.train(x_train, y_train, maxEpoch=25, learning_rate=0.0003)
def main(): import argparse parser = argparse.ArgumentParser( description='Exhaustively try project-toolchain-board combinations') parser.add_argument('--project', default=None, nargs="+", choices=get_projects(), help='run given project(s) only (default: all)') parser.add_argument('--toolchain', default=None, nargs="+", choices=get_toolchains(), help='run given toolchain(s) only (default: all)') parser.add_argument('--board', default=None, nargs='+', choices=get_boards(), help='run given board(s) only (default: all)') parser.add_argument( '--out-prefix', default='build/_exhaust-runs', help='output directory prefix (default: build/_exhaust-runs)') parser.add_argument( '--build_type', default='generic', help= 'Type of build that is performed (e.g. regression test, multiple options, etc.)' ) parser.add_argument('--build', default=0, help='Build number') parser.add_argument('--parameters', default=None, help='Tool parameters json file') parser.add_argument('--seed', default=None, help='Seed to assign when running the tools') parser.add_argument('--run_config', default=None, help="Run configuration file in JSON format.") parser.add_argument('--fail', action='store_true', help='fail on error') parser.add_argument('--verbose', action='store_true', help='verbose output') parser.add_argument('--overwrite', action='store_true', help='deletes previous exhuast builds before running') args = parser.parse_args() if args.verbose: global logger logger = logging.getLogger('MyLogger') handler = logging.StreamHandler() formatter = logging.Formatter('%(levelname)s: %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) logger.debug("Parsing Arguments") tasks = Tasks(src_dir) assert args.run_config is None or args.run_config and not (args.project or args.toolchain) args_dict = dict() seeds = list() if args.run_config: with open(args.run_config, 'r') as f: run_config = json.load(f) project = safe_get_dict_value(run_config, "project", None) toolchain = safe_get_dict_value(run_config, "toolchain", None) seeds = [ int(i) for i in safe_get_dict_value(run_config, "seeds", [0]) ] build_numbers = [ int(i) for i in safe_get_dict_value(run_config, "build_number", [0]) ] args_dict = { "project": project, "toolchain": toolchain, "board": args.board } else: args_dict = { "project": args.project, "toolchain": args.toolchain, "board": args.board } seeds = [int(args.seed)] if args.seed else [0] build_numbers = [int(args.build)] if args.build else [0] params_file = args.parameters params_strings = [None] if params_file: params_strings = [] assert len( args.toolchain ) == 1, "A single toolchain can be selected when running multiple params." params_helper = ToolParametersHelper(args.toolchain[0], params_file) for params in params_helper.get_all_params_combinations(): params_strings.append(" ".join(params)) logger.debug("Getting Tasks") task_list = tasks.get_tasks(args_dict, seeds, build_numbers, params_strings) runner = Runner(task_list, args.verbose, args.out_prefix, root_dir, args.build_type, build_numbers, args.overwrite) logger.debug("Running Projects") runner.run() logger.debug("Collecting Results") runner.collect_results() logger.debug("Printing Summary Table") result = print_summary_table(args.out_prefix, args.project, args.toolchain, args.board, args.build_type, args.build) if not result and args.fail: print("ERROR: some tests have failed.") exit(1)