Exemple #1
0
 def execute(self, driver, config):
     try:
         if self.config_key == None:
             self.param = ""
         else:
             if self.config_key in config.params:
                 self.param = config.params[self.config_key]
             else:
                 raise Exception(
                     "Cannot execute command %s as there was no key named %s in the config params"
                     % (self.command, self.config_key))
         self.driver = driver
         result = Result(passed=True,
                         message="Execute %s %s" %
                         (self.__repr__(), self.param))
         logging.debug("Execute : %s %s" % (self.__repr__(), self.param))
         if self.command == self.NAVIGATE:
             self.driver.get(self.param)
         elif self.command == self.CLICK:
             WebElement(self.driver, self.element.locators).click()
         elif self.command == self.SENDKEYS:
             WebElement(self.driver,
                        self.element.locators).send_keys(self.param)
         elif self.command == self.VERIFY:
             WebElement(self.driver, self.element.locators).highlight()
         else:
             raise ValueError("Command not supported: %s" % self.command)
     except Exception as e:
         logging.debug("Exception : %s" % str(e))
         result = Result(passed=False,
                         message="Command raised an exception %s" % str(e),
                         exception=str(e))
     finally:
         self.execution_results.append(result)
         return result
Exemple #2
0
 def delete_by_uuid(cls, del_uuid):
     execution = Execution.query.filter_by(uuid=del_uuid)
     if execution:
         from models.result import Result
         linked_results = Result.query.filter_by(execution_uuid=del_uuid)
         for result in linked_results:
             Result.delete_by_uuid(result.uuid)
         execution.delete()
         # rmtree(self.fq_storage_path)
         db_session.commit()
def insert_profiles(base_url, db_path, driver_list, category):
    current_file = os.path.basename(__file__)
    current_file_name = os.path.splitext(current_file)[0]

    for driver_id in driver_list:

        url = base_url + "/" + current_file_name + "/" + str(
            driver_id) + "/" + category

        try:
            print(url)
            response = requests.get(url)
        except requests.exceptions.RequestException as e:
            print(e)
            sys.exit(1)

        if response.status_code == 200:

            doc = pq(response.text)

            connection = sqlite3.connect(db_path)

            try:

                if doc("main > div").eq(0).hasClass("profile"):

                    # Header - Driver Info
                    driver = Driver(doc, driver_id)
                    connection.execute(
                        '''INSERT INTO drivers 
					(id,fullname,name,lastname,birthdate,deathdate,nationality,created_at,updated_at,deleted_at)
					VALUES (?,?,?,?,?,?,?,?,?,?)''', driver.get_tuple())

                    # Starts-WRC
                    for season in doc.items("h5.profile-season"):

                        starts = season.nextAll('div.profile-starts').eq(0)

                        for start in starts('div.profile-start-line').items():
                            result = Result(driver.id, season, start)
                            connection.execute(
                                '''INSERT INTO results 
							(event_id,driver_id,codriver_id,season,car_number,car,plate,team,chassis,category,result,created_at,updated_at,deleted_at)
							VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', result.get_tuple())

                connection.commit()

            except Exception as e:
                connection.rollback()
                raise e
            finally:
                connection.close()
Exemple #4
0
def result_counter(result):
    final = Result()
    for item in result:
        for key, value in item.items():
            if key == 'viewCount':
                final.view_count += int(value)
            elif key == 'subscriberCount':
                final.subscribers = int(value)
            elif key == 'commentCount':
                final.comments += int(value)
            elif key == 'dislikeCount':
                final.dislikes_count += int(value)
            elif key == 'likeCount':
                final.likes_count += int(value)
    return final
def result_download(result_id, label):
    if result_id is not None:
        results = [get_result(result_id)]
    else:
        results = get_all_results(label=label)

    if results is None or len(results) == 0:
        return json.dumps({
            'status': 'error',
            'code': 1
        }), 404, {
            'ContentType': 'application/json'
        }

    out_path = f'public/results_{time.time()}.zip'

    zipf = zipfile.ZipFile(out_path, 'w', zipfile.ZIP_DEFLATED)
    for r in results:
        zipf.writestr(f'result_{r.id}.json', r.toJson())
    if len(results) > 1:
        zipf.writestr('results.json', Result.toListJson(results))
    zipf.close()

    file = io.FileIO(out_path, 'r')
    memory_file = io.BytesIO()
    memory_file.write(file.readall())
    memory_file.seek(0)
    file.close()

    os.remove(out_path)

    return send_file(memory_file,
                     attachment_filename='results.zip',
                     as_attachment=True)
Exemple #6
0
    def run(self):
        # Check preprocessor
        if self.preprocessor == None:
            self.__log(Log('No preprocessor', Severity.ERROR))
            return Result(self.minSupport,
                          self.maxGap,
                          errors=['No preprocessor'])

        self.__log(
            Log('Starting algorithm: {}'.format(self.algorithm.__name__),
                Severity.INFO))

        t0 = perf_counter()
        mdb, skippedDays = self.preprocessor.GenerateTemporalMdb()
        preTime = perf_counter() - t0

        t0 = perf_counter()
        results = self.algorithm(mdb, self.logger, self.minSupport,
                                 self.maxGap)
        results.algorithmTime = perf_counter() - t0

        results.skippedDays = skippedDays
        results.dataset = self.dataset
        results.preprocessingTime = preTime
        results.job = self

        self.results = results
        return self.results
Exemple #7
0
def delete_local_results(run_id=None,
                         result_id=None,
                         strat_id=None,
                         label=None):
    files_to_remove = []
    if strat_id is not None:
        files_to_remove = list(
            set(glob.glob(f'public/strat_{strat_id}/result_*.json')))
        for f in files_to_remove[:]:
            with open(f, 'r') as file:
                txt = file.read()
                if not is_json(txt):
                    files_to_remove.remove(f)

    elif run_id is not None or result_id is not None or label is not None:
        files_to_remove = list(set(glob.glob(f'public/**/result_*.json')))
        for f in files_to_remove[:]:
            with open(f, 'r') as file:
                txt = file.read()

                if not is_json(txt):
                    files_to_remove.remove(f)
                    continue

                result = Result.fromJson(txt)
                if (run_id is not None and result.config[0]['run_id'] != run_id
                    ) or (result_id is not None and result.id != result_id
                          ) or (label is not None
                                and result.config[0]['label'] != label):
                    files_to_remove.remove(f)

    for f in files_to_remove:
        os.remove(f)

    return files_to_remove
Exemple #8
0
 def create_result(self, test_id, execution_uuid):
     if not execution_uuid:
         self.__outputs[test_id] += f'\nNo Execution UUID provided.'
     result = Result.create(execution_uuid)
     if result.uuid:
         self.__outputs[test_id] += f'\nCreated result: {result}'
         return result.uuid
     return None
Exemple #9
0
def tpminer(mdb, logger, minSupport, maxGap):
    patternSets = tpminer_main(mdb, minSupport, logger)

    # Convert set to list
    patterns = []
    for p in patternSets:
        patterns.append(p)

    return Result(minSupport, maxGap, patterns, [])
def get_results():  # noqa: E501
    """Get all results

     # noqa: E501


    :rtype: List[Result]
    """
    results = ResultImpl.get_all()
    return result_schema_many.dump(results)
def get_repositories():
    search_term = request.args.get('search_term')

    if not search_term:
        return render_template('repos.html', result=Result(search_term, []))

    github_service = GithubService(
        'https://api.github.com/search/repositories')

    loop = asyncio.get_event_loop()
    task = loop.create_task(github_service.get_repositories(search_term))
    repositories = loop.run_until_complete(task)

    if not loop.is_running:
        loop.stop()

    end = time.time()

    return render_template('repos.html',
                           result=Result(search_term, repositories))
def get_result_by_uuid(result_uuid):  # noqa: E501
    """Retrieve a result

     # noqa: E501

    :param result_uuid: UUID of the result to return
    :type result_uuid: str

    :rtype: Result
    """
    result = ResultImpl.get_by_uuid(result_uuid)
    return result_schema.dump(result)
def delete_result_by_uuid(result_uuid):  # noqa: E501
    """Delete a result

    Deletes the result with the given UUID on it # noqa: E501

    :param result_uuid: UUID of the result to delete
    :type result_uuid: str

    :rtype: Result
    """
    result = ResultImpl.delete_by_uuid(result_uuid)
    return result_schema.dump(result)
Exemple #14
0
def run_package():

    suite = PackageSuite.query.all()
    total = 0
    passes = 0
    fail = 0
    pid = request.args.get("pid")
    result = Result(pid, passes + fail, passes, fail,
                    datetime.date.today().isoformat(), "Scheduled", 0, "")
    db.session.add(result)
    db.session.commit()
    return redirect("/results/")
Exemple #15
0
def run_problems(input_filename, h_list, heur, processes):
    results = []

    with mp.Pool(processes=processes) as pool:
        if heur == 'cons':
            data_file_path = BASE_DIR + '/data/{}.txt'.format(input_filename)

            for problem in read_problems(data_file_path):
                n_jobs = problem['n_jobs']
                df = problem['df']
                total_p = df['p'].sum()
                result = []
                for h in h_list:
                    d = int(h * total_p)

                    t = time.process_time()

                    schedule = heur_cons.create_schedule(df, d, h)

                    elapsed_t = time.process_time() - t

                    # veryfing if sequence is valid
                    utils_test.sequence_test(n_jobs, schedule)

                    cost = utils.get_cost(schedule)

                    result_h = Result(n_jobs, h, schedule, cost, elapsed_t)
                    result.append(result_h)

                results.append(result)

        elif heur in ['local', 'tabu']:
            data_file_path = BASE_DIR + \
                '/output/{}-cons-results.txt'.format(input_filename)
            h_list_len = len(h_list)
            result = []

            result_h = pool.starmap(
                multiprocessing_func,
                zip(read_cons_results(data_file_path), repeat(heur)))

            for e in result_h:
                result.append(e)

                if len(result) >= h_list_len:
                    results.append(result)
                    result = []

        else:
            raise ValueError('Selected Heuristic do not exist')

        return results
Exemple #16
0
def armada(mdb, logger, minSupport, maxGap):
    supportList = Support.GenerateStateSupportList(mdb)
    mdb = Support.RemoveNonSupported(minSupport, supportList, mdb)
    logger.log(Log('Non-Supported states removed', Severity.INFO))

    frequentStates = Support.ExtractFrequentStates(minSupport, supportList,
                                                   mdb)
    logger.log(
        Log('Frequent states found: ' + str(len(frequentStates)),
            Severity.INFO))

    patterns = Armada(mdb, frequentStates, minSupport, maxGap, logger)

    return Result(minSupport, maxGap, patterns, frequentStates)
Exemple #17
0
 def get_marks(self,sector,type_of_result,predictions,stock_data):
     if sector != 'technology':
         stock_movement,difference = self.stock_calculator.stock_movement(stock_data.tail(1))
     else:
         stock_movement,difference = self.stock_calculator.stock_movement(stock_data,'technology')
     keys = list(predictions.keys())
     results = []
     for key in keys:
         if key != 'HAS_CHANGED':
             check_if_correct = self.check_if_correct(predictions[key],stock_movement)
             now = datetime.datetime.now()
             new_result = Result(sector=sector,type=type_of_result,machine_learning_technique=key,date=now.strftime("%Y-%m-%d %H:%M"),prediction=predictions[key],result=check_if_correct,difference=difference,has_changed=predictions['HAS_CHANGED'])
             results.append(new_result)
     return results
Exemple #18
0
def cmp_data(youtube_data):
    cmped_result = Result()
    db_data = get_data_from_file()
    cmped_result.likes_count = youtube_data.likes_count - db_data.likes_count
    cmped_result.dislikes_count = youtube_data.dislikes_count - db_data.dislikes_count
    cmped_result.comments = youtube_data.comments - db_data.comments
    cmped_result.subscribers = youtube_data.subscribers - db_data.subscribers
    cmped_result.view_count = youtube_data.view_count - db_data.view_count
    return cmped_result
Exemple #19
0
    def execute(self, driver):
        logging.debug("Executing Suite %s" % self.name)
        self.driver = driver
        suite_result = Result(passed=True,message="Passed",exception="Passed")

        for test in self.tests:
            test_result = test.execute(driver)
            suite_result.step_results.append(test_result)
            if not test_result.passed:
                suite_result.passed = False
                suite_result.message = str(self.__class__)
                suite_result.exception = test_result.exception
                suite_result.failed_state = test_result.failed_state
                suite_result.actual_state = test_result.actual_state
                suite_result.html = self.driver.page_source
                suite_result.screenshot = self.driver.get_screenshot_as_base64()

        self.suite_results.append(suite_result)
        self.cascade_save()
        return suite_result
def download_result_by_uuid(result_uuid):  # noqa: E501
    """Downloads the generated results

     # noqa: E501

    :param result_uuid: UUID of the result to download
    :type result_uuid: str

    :rtype: file
    """
    result = ResultImpl.get_by_uuid(result_uuid)
    if os.path.isfile(result.fq_result_storage_path):
        return send_file(result.fq_result_storage_path, as_attachment=True, attachment_filename="Results.zip")
    else:
        return None
def create_result(post_result=None):  # noqa: E501
    """Creates a new result

     # noqa: E501

    :param post_result:
    :type post_result: dict | bytes

    :rtype: None
    """
    if connexion.request.is_json:
        post_result = POSTResult.from_dict(connexion.request.get_json())  # noqa: E501

    created_result = ResultImpl.create(post_result.execution_uuid)
    return result_schema.dump(created_result)
Exemple #22
0
def read_cons_results(data_file_path):
    with open(data_file_path, 'r') as sch_file:
        n_problems = int(sch_file.readline().strip())
        h_list = [float(e) for e in sch_file.readline().strip().split(';')]
        n_jobs = int(sch_file.readline().strip())

        for _ in range(n_problems):
            job = int(sch_file.readline().strip())
            for _ in h_list:
                h, d, start, early_seq, tardy_dict, cost = sch_file.readline(
                ).strip().split('|')
                schedule = Schedule(int(d), int(start),
                                    OrderedDict(json.loads(early_seq)),
                                    OrderedDict(json.loads(tardy_dict)))
                result = Result(n_jobs, float(h), schedule, int(cost))

                yield result
def results(result_id, label):
    if result_id is not None:
        result = get_result(result_id)
        if result is None:
            result = json.dumps(result)
        else:
            result = result.toJson()
    else:
        result_list = get_all_results(label=label)

        if label is None:
            result_list = list(set([x.config[0]['label']
                                    for x in result_list]))

        result = Result.toListJson(result_list)

    return result, 200, {'ContentType': 'application/json'}
 def get_results(self):
     cycle = self.cycle
     settings = self.settings
     results = Result(
         time_avg_cost=round(cycle.time_avg_rebalance_cost, 3),
         time_avg_cond_drift=round(cycle.time_avg_cond_drift, 3),
         obj_function=round(
             cycle.time_avg_rebalance_cost * settings.cost_coef +
             cycle.time_avg_cond_drift, 3),
         moved_bike_total_count=cycle.cumulative_moved_bike_count,
         rebalanced_bike_total_count=cycle.cumulative_rebalanced_bike_count,
         total_cycles=cycle.count + 1,
         simulation_hour=(cycle.count + 1) * settings.interval_hour,
         distance_moved=cycle.distance_moved,
         demand_supply_gap_total_decrement=cycle.
         cumulative_demand_supply_gap_decrement)
     self.simulation.set_result(results)
     return results
 def get_all_results(self):
     self._db_cur.execute('SELECT * FROM result', ())
     results = []
     row = self._db_cur.fetchone()
     while row:
         result = Result()
         result.request_id = row[1]
         result.adress = row[3]
         result.square = row[6]
         result.region_id = row[8]
         results.append(result)
         row = self._db_cur.fetchone()
     return results
Exemple #26
0
def multiprocessing_func(cons_result, heur):
    t = time.process_time()

    if heur == 'local':
        search_func = heur_local.create_schedule
    elif heur == 'tabu':
        search_func = heur_tabu.create_schedule

    cost, schedule = search_func(cons_result.n_jobs, cons_result.schedule,
                                 cons_result.cost)

    utils_test.sequence_test(cons_result.n_jobs, schedule)

    elapsed_t = time.process_time() - t

    cost = utils.get_cost(schedule)

    result_h = Result(cons_result.n_jobs, cons_result.h, schedule, cost,
                      elapsed_t)

    return result_h
def __generate(randomizer: Randomizer, settings: Settings, race: bool = False, retries: int = 0) -> Result:
    if retries >= 3:
        return None

    patch = None
    permalink = None
    spoiler = None

    try:
        patch_start_time = time.perf_counter()
        logging.info(f"({settings.seed}) Generating patch ({retries + 1} / 3)...")
        logging.info(f"({settings.seed}) Settings: {str(request.get_json())}")

        patch = __generatePatch(randomizer, settings)
        if patch is None:
            logging.info(f"({settings.seed}) Failed to generate patch in {time.perf_counter() - patch_start_time} seconds!")
            return __generate(randomizer, settings, race, retries + 1)
        else:
            logging.info(f"({settings.seed}) Generated patch in {time.perf_counter() - patch_start_time} seconds!")

        if not race:
            logging.info(f"({settings.seed}) Race Mode off, generating spoiler...")
            spoiler_start_time = time.perf_counter()
            spoiler = __generateSpoiler(randomizer, settings)
            logging.info(f"({settings.seed}) Generated spoiler in {time.perf_counter() - spoiler_start_time} seconds!")
        else:
            logging.info(f"({settings.seed}) Race Mode on, not generating spoiler...")

        if database.enabled:
            logging.info(f"({settings.seed}) Generating permalink...")
            permalink_start_time = time.perf_counter()
            permalink = database.create(patch, spoiler, settings)
            logging.info(
                f"({settings.seed}) Permalink generated in {time.perf_counter() - permalink_start_time} seconds!")

        return Result(patch, spoiler, permalink)

    except Exception as e:
        logging.exception(e)
        return __generate(settings, race, retries + 1)
    def test_push_results(self):
        # Make Result model
        now = datetime.datetime.now()
        new_result = Result(sector='test',
                            type='test',
                            machine_learning_technique='test',
                            date=now.strftime("%Y-%m-%d %H:%M"),
                            prediction=1,
                            result='0',
                            difference=12.34,
                            has_changed=1)

        # Push result to database
        results = [new_result]
        results_to_db = ResultsToDB()
        results_to_db.push_results(results)

        # Create engine
        engine = create_new_engine('stockbot')
        Session = create_new_session(engine)
        session = Session()
        query = session.query(Result).filter(Result.sector.in_(['test'])).all()
        self.assertEqual(type(query[0]), Result)
Exemple #29
0
def read_local_results(run_id=None, result_id=None, strat_id=None, label=None):
    if strat_id is not None:
        files = list(set(glob.glob(f'public/strat_{strat_id}/result_*.json')))
    else:
        files = list(set(glob.glob(f'public/**/result_*.json')))

    results = []
    for f in files:
        with open(f, 'r') as file:
            txt = file.read()
            json_r = json.loads(txt)
            if is_json(txt):
                results.append(json_r)

    results_flatten = []
    for r in results:
        if isinstance(r, list):
            results_flatten = results_flatten + r
        else:
            results_flatten.append(r)

    if run_id is not None:
        results_flatten = [
            r for r in results_flatten if r['config'][0]['run_id'] == run_id
        ]

    if result_id is not None:
        results_flatten = [r for r in results_flatten if r['id'] == result_id]

    if label is not None:
        results_flatten = [
            r for r in results_flatten if r['config'][0]['label'] == label
        ]

    results_list = Result.fromListDict(results_flatten)

    return results_list
Exemple #30
0
def search_flat(search_form, request_id):
    payload = utils.create_form_data(search_form.__dict__)
    res = requests.post('https://extra.egrp365.ru/api/extra/index.php',
                        data=payload.encode('utf-8'),
                        headers=utils.HEADERS)
    try:
        json_data = json.loads(res.text)
        if json_data['success']:
            db = DB()
            for x in json_data['data']:
                result = Result()
                result.cadastral = x['cn']
                result.adress = x['address']
                result.floor = x['floor']
                # result.response_json = str(json_data)
                result.region_id = search_form.macroRegionId
                result.square = square_search(result.cadastral)
                db.insert(result, 'result')
        else:
            SEARCH_SHEDULE.append_request(request_id)
    except Exception as e:
        logging.error('Search error. Response text: ' + res.text)
        raise e
def main():
    # Парсим агрументы командной строки, переданные при запуске скрипта из консоли
    parser = argparse.ArgumentParser()
    parser.add_argument('--driver', default='{SQL Server}', type=str)
    parser.add_argument('--server',
                        default='DESKTOP-7APAP23\SQLEXPRESS',
                        type=str)
    parser.add_argument('--db', default='Poems', type=str)
    parser.add_argument('--uid', default='Tester', type=str)
    parser.add_argument('--pwd', default='Tester123', type=str)
    args = parser.parse_args()

    # Создаем объект коннектора к серверу и создаем базу, если ее еще не было
    connector = Connector(args)
    connector.create_database()

    # Создаем таблицы в бд
    Author().create(connector)
    Poem().create(connector)
    FirstQuatrain().create(connector)
    SecondQuatrain().create(connector)
    Result().create(connector)

    # Определяем путь к источнику данных (стихов)
    current_directory = os.getcwd()
    data_directory = os.path.join(current_directory, 'data')
    data_list = os.listdir(data_directory)

    # Обьявляем список для хранения авторов
    authors_list = []

    # Создаем сессии для каждой таблицы
    session = sessionmaker()
    session.configure(bind=connector.get_engine())
    authors_session = session()
    poem_session = session()
    firstQuantrain_session = session()
    secondQuantrain_session = session()
    second_parts_list_object = []

    # Заполняем таблицы
    for data in data_list:
        file_path = os.path.join(data_directory, data)
        with open(file_path, encoding="utf-8") as file:
            file_data = file.readlines()
            poem_title = file_data[0].strip()
            author_fullname = file_data[1].strip().split(' ')
            author_firstname = author_fullname[0]
            auth_secondname = author_fullname[1]
            authors_list.append(auth_secondname)
            first_quatrain = ' '.join(x.strip() for x in file_data[2:6])
            second_quatrain = file_data[6:]

            author = Author(first_name=author_firstname,
                            second_name=auth_secondname,
                            objectid=str(uuid4()))
            authors_session.add(author)
            authors_session.commit()

            poem = Poem(poem_title=poem_title,
                        objectid=str(uuid4()),
                        author_id=authors_session.query(Author).filter_by(
                            objectid=author.objectid).first().id)
            poem_session.add(poem)
            poem_session.commit()

            firstQuatrain = FirstQuatrain(
                objectid=str(uuid4()),
                quatrain=first_quatrain,
                poem_title_id=poem_session.query(Poem).filter_by(
                    objectid=poem.objectid).first().id)
            firstQuantrain_session.add(firstQuatrain)
            firstQuantrain_session.commit()

            for part in second_quatrain:
                secondQuatrain = SecondQuatrain(
                    objectid=str(uuid4()),
                    parts=part.strip(),
                    order_number=(second_quatrain.index(part) + 1),
                    first_quatrain_id=firstQuantrain_session.query(
                        FirstQuatrain).filter_by(
                            objectid=firstQuatrain.objectid).first().id)
                second_parts_list_object.append(secondQuatrain)
    random.shuffle(second_parts_list_object)
    secondQuantrain_session.add_all(second_parts_list_object)
    secondQuantrain_session.commit()

    # Создаем лишние записи в таблице авторов и названий стихов
    fake_author = Author(first_name='Ватрушкин', second_name='Иосиф')
    authors_session.add(fake_author)
    authors_session.commit()

    fake_title = Poem(poem_title='Лишнее название', objectid=str(uuid4()))
    poem_session.add(fake_title)
    poem_session.commit()

    # Закрываем сессии
    authors_session.close()
    poem_session.close()
    firstQuantrain_session.close()
    secondQuantrain_session.close()

    # Создаем файл для хранения списка пользователей + prettyTable
    file_users_path = os.path.join(current_directory, 'users.txt')
    file = open(file_users_path, 'w', encoding="utf-8")

    # Создаем и добавляем пользователей сервера. Записываем пользователей в файл
    for i in range(30):
        user = User()
        # engine = connector.get_engine()
        # engine.execute(get_sql_create_login(user))
        random.shuffle(authors_list)
        p_table = PrettyTable()
        p_table.field_names = ['Login', 'Password', 'Author name']
        print()
        p_table.add_row([user.username, user.password, authors_list.pop()])
        file.write(p_table.get_string())
        file.write('\n')
Exemple #32
0
def get_all_results(label: str = None):
    results = Result.fromListDict(fs_get_all(_collection))
    if label is not None:
        results = [r for r in results if r.config[0]['label'] == label]
    return results