Example #1
0
 def measure(self):
     _logger.info("mocked measure")
     if random.randint(0, 10) > 7:
         return Result(ResultState.ERROR)
     else:
         value = random.randint(1, 300) / 10
         return Result(ResultState.OK, pm10=value, pm25=value)
Example #2
0
 def __init__(self, segments, settings, segment_generator, effects):
     self.segments = segments
     self.settings = settings
     self.breath_pause = segment_generator.generate_breath_pause()
     self.effects = effects
     self.segment_selector = SegmentSelector(segments)
     self.logger = Logger()
     self.result = Result()
     self.init()
Example #3
0
 def validate_pages(self):
     if not self.args.pages:
         return Result.failure('Heri, provide please pages'
                               ' parameter or use --help ;)')
     for page in self.args.pages.split(','):
         if not validators.between(int(page), min=1, max=settings.MAX_PAGE):
             return Result.failure(f"{page} page out of range,"
                                   f" max is {settings.MAX_PAGE}")
     return Result.success()
Example #4
0
    def validate_engine(self):
        if not self.args.engine:
            return Result.success()

        if self.args.engine not in ['g', 'l']:
            return Result.failure('Heri, provide please engine '
                                  'like "g" (Google) or "l" (Linkedin)')

        return Result.success()
Example #5
0
    def validate_proxy(self):
        if not self.args.proxy:
            return Result.success()
        try:
            host, port = self.args.proxy.split(':')
        except ValueError:
            return Result.failure('Port is not provided, may be "80"?'
                                  ' Proxy example: "142.250.75.14:80"')
        if self.args.proxy and not validators.ipv4(host):
            return Result.failure('Heri, provide please proxy like'
                                  ' "142.250.75.14:80" or use --help ;)')

        return Result.success()
Example #6
0
    def search(self):
        self.driver.get(settings.LINKEDIN_URL)
        self.enter_query()

        linkedin_urls = []
        for page in self.pages:
            navigated = LinkedinPaginator(driver=self.driver,
                                          page=page).go_to_page()
            if not navigated:
                return Result.failure(f'Pagination error: page {page}')

            linkedin_urls = linkedin_urls + self.find_linkedin_urls()
        return Result.success(linkedin_urls)
    def test_loop_actor_on_hold(self):
        loop_count = 3

        process = MockProcess()

        process._mqtt_out_actor = "_mqtt_channel_sensor_switch"

        process.test_open(loop_count=loop_count)

        loop_params = process.create_dummy_loop_params()
        loop_params.on_hold = True
        loop_params.use_switch_actor = True
        process._determine_loop_params = MagicMock()
        process._determine_loop_params.return_value = loop_params

        process.run()

        self.assertEqual(0, process.test_sensor.open.call_count)
        self.assertEqual(0, process.test_sensor.measure.call_count)
        self.assertEqual(1, process.test_sensor.close.call_count)  # finally

        self.assertEqual(len(process.mqtt_messages), loop_count * 2 + 1)
        message = Result(ResultState.DEACTIVATED, timestamp=process._now()).create_message()
        for m in process.mqtt_messages:
            self.assertTrue(m in [message, SwitchSensor.OFF.value])
Example #8
0
    def validate(self):
        if not self.args.query:
            return Result.failure('Heri, provide please query'
                                  ' parameter or use --help ;)')
        result = self.validate_pages()
        if result.is_failure():
            return result

        result = self.validate_proxy()
        if result.is_failure():
            return result

        result = self.validate_engine()
        if result.is_failure():
            return result

        return Result.success()
    def test_max_time(self):
        time_interval_max = random.random() * 1000

        process = MockProcess()

        process._time_interval_max = time_interval_max
        process._last_result = Result(ResultState.OK, pm10=1, pm25=1, timestamp=process.now)

        compare = process._calc_interval_time()

        self.assertEqual(compare, time_interval_max)
Example #10
0
    def go_to_page(self, page):
        if page == 1:
            return Result.success()
        previous_last_visible_page = None
        while True:
            try:
                pagination_button = self.driver.find_element_by_link_text(
                    str(page))
                pagination_button.click()
                return Result.success()
            except selenium.common.exceptions.NoSuchElementException:
                pass

            last_visible_page = self.click_last_visible_page()
            if previous_last_visible_page and last_visible_page <= previous_last_visible_page:
                return Result.failure(
                    f"Searchable page is out of range, last page:"
                    f" {previous_last_visible_page}, searchable page: {page}")
            previous_last_visible_page = last_visible_page
            if settings.MAX_PAGE <= last_visible_page:
                return Result.failure(f"MAX_PAGE reached")
    def add(self):
        time.sleep(1)
        self.driver.get(self.candidate_url)
        time.sleep(3)

        signedin_to_ats = self.check_signed_in_to_ats()
        if not signedin_to_ats:
            return Result.failure('Sign in to ATS first')

        profile_in_ats = self.check_profile_in_ats()
        if profile_in_ats:
            return Result.success(
                f'Profile in ATS {unquote(self.candidate_url)}')

        save_to_ats_pressed = self.press_save_to_ats_button()
        if not save_to_ats_pressed:
            return Result.failure(
                f'Save to ATS button was'
                f' not present for {unquote(self.candidate_url)}')

        self.try_save_to_new_vacancy()

        vacancy_selected = self.select_vacancy()
        if not vacancy_selected:
            Result.failure(f'Can not find \'{self.vacancy}\' in ATS options')

        self.press_add_to_vacancy_in_ats()

        return Result.success(
            f'{unquote(self.candidate_url)} added to ATS database')
Example #12
0
    def search(self):
        self.driver.get(settings.GOOGLE_URL)
        self.enter_query()

        linkedin_urls = []

        for page in self.pages:
            navigation_result = self.go_to_page(page)
            if navigation_result.is_failure():
                return navigation_result

            linkedin_urls = linkedin_urls + self.find_linkedin_urls()
        return Result.success(linkedin_urls)
    def test_min_time(self):
        time_interval_max = 200 + random.random() * 1000
        time_interval_min = time_interval_max / 5

        process = MockProcess()

        process._time_interval_max = time_interval_max
        process._time_interval_min = time_interval_min
        process._last_result = Result(ResultState.OK, pm25=1, timestamp=process.now,
                                      pm10=process.DEFAULT_ADAPTIVE_DUST_UPPER + 1)

        compare = process._calc_interval_time()

        self.assertEqual(compare, time_interval_min)
    def test_middle(self):
        time_max = 300
        time_min = 100

        process = MockProcess()
        process._time_interval_max = time_max
        process._time_interval_min = time_min

        dust = (process.DEFAULT_ADAPTIVE_DUST_UPPER + process.DEFAULT_ADAPTIVE_DUST_LOWER) / 2
        process._last_result = Result(ResultState.OK, pm10=dust, pm25=1, timestamp=process.now)

        compare = process._calc_interval_time()

        self.assertAlmostEqual(compare, (time_max + time_min) / 2)
Example #15
0
    def measure(self):
        if self._sensor is None:
            raise SensorError("sensor was not opened!")
        if not self._warmup:
            raise SensorError("sensor was not warmed up before measurement!")

        try:
            measurement = self._sensor.query()
        except SerialException as ex:
            self._error_ignored += 1
            if self._error_ignored > self._abort_after_n_errors:
                raise SensorError(ex)

            _logger.error("self._sensor.query() failed, but ignore %s of %s!",
                          self._error_ignored, self._abort_after_n_errors)
            _logger.exception(ex)
            return Result(ResultState.ERROR)
        else:
            if measurement is None:
                pm25, pm10 = None, None
            else:
                pm25, pm10 = measurement

            if not self.check_measurement(pm10=pm10, pm25=pm25):
                self._error_ignored += 1
                if self._error_ignored >= self._abort_after_n_errors:
                    raise SensorError(
                        f"{self._error_ignored} wrong measurments!")

                _logger.warning(
                    "wrong measurment (ignore %s of %s): pm25=%s; pm10=%s!",
                    self._error_ignored, self._abort_after_n_errors, pm25,
                    pm10)
                return Result(ResultState.ERROR)
            else:
                self._error_ignored = 0
                return Result(ResultState.OK, pm10=pm10, pm25=pm25)
Example #16
0
def retrieve_results(n_percentile):
    search_queries = parse_trec('documents/irg_queries.trec')
    search_collections = parse_trec('documents/irg_collection_clean.trec')
    # search_collections = parse_trec('documents/irg_collection_short.trec')
    # search_collections = eliminate_stopwords(search_collections)
    # write_collection_doc(search_collections, 'documents/irg_collection_clean.trec')

    print('======= Statistics =======')
    print(f'Queries: {len(search_queries)}')
    print(f'Collections: {len(search_collections)}')
    print(f'Removal of {int((1-n_percentile)*100)}%-ile')
    print('==========================')

    # TF-IDF
    document_results = []
    for search_query_id, search_query_text in search_queries.items():
        print(
            f'Current query id: {search_query_id}, text: "{search_query_text}"'
        )
        terms = search_query_text.split(' ')
        documents = keep_n_percentile_most_relevant_words(search_collections,
                                                          search_query_text,
                                                          n=n_percentile)
        document_scores = {}
        search_texts_collection = TextCollection(documents.values())
        for document_id, document_text in documents.items():
            for term in terms:
                current_score = document_scores.get(document_id, 0.0)
                document_scores[
                    document_id] = current_score + search_texts_collection.tf_idf(
                        term, document_text)

        rank = 1
        for document_id, document_scores in sorted(document_scores.items(),
                                                   key=lambda kv: kv[1],
                                                   reverse=True):
            if rank <= 1000:
                document_results.append(
                    Result(search_query_id, document_id, rank,
                           document_scores))
                rank += 1

    result_writer(document_results,
                  f'IE_result_keep_{int(n_percentile*100)}_percentile.trec')
    print('Done')
    def test_create_message(self):

        now = datetime.datetime(2020,
                                1,
                                1,
                                2,
                                2,
                                3,
                                tzinfo=datetime.timezone.utc)

        r = Result(ResultState.ERROR, timestamp=now)
        m = r.create_message()
        self.assertEqual(
            m,
            '{"PM10": null, "PM25": null, "STATE": "ERROR", "TIMESTAMP": "2020-01-01T02:02:03+00:00"}'
        )

        r = Result(ResultState.OK, pm10=0.1, pm25=0.2, timestamp=now)
        m = r.create_message()
        self.assertEqual(
            m,
            '{"PM10": 0.1, "PM25": 0.2, "STATE": "OK", "TIMESTAMP": "2020-01-01T02:02:03+00:00"}'
        )
Example #18
0
 def dummy_measure(self):
     return Result(ResultState.OK, pm10=1, pm25=1)
Example #19
0

####################### SET MODEL TRAINING #############################
sampleSize = '0'  # choose between 0 to 2. 0 correspondes to 50D, 1 to 100D, 2 to 200D. 3 to 5 is also available but for generation based scaling
loss='WCategoricalCrossentropy' # loss function chosen. choose between "WCategoricalCrossentropy" or "categorical_crossentropy". WCategoricalCrossentropy is the expected loss described in the thesis.



######################THESE PARAMETERS ARE FIXED #########################
esconfig = [0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1] 
#performance = Performance()

timeSteps = 2  # the number of time steps for the LSTM network
precision_value = 'ert-2'

#set to precision value to negative 2
result = Result(ert_column=precision_value)

#load the performance and ELA files generated from data gathering
performance = pd.read_csv("./perf/DataGathering_performance.csv")
ela = pd.read_csv("./perf/DataGathering_elaFeatures.csv")

result.addPerformance(performance)
result.addELA(ela)

#this could sometimes fail if the training sample does not contain at least two time steps. This could happen if the CMA-ES finds the optimal value before the 2nd checkpoint
Xtrain, Ytrain = result.createTrainSet(dataset=sampleSize, algorithm=None, reset=False, interface=None, RNN=timeSteps)


model = Models(Xtrain,Ytrain,_shuffle=False)
model.trainLSTM(stepSize=timeSteps, size=sampleSize, loss=loss, precision_value=precision_value)
Example #20
0
                   baseBudget=10000,
                   dimensions=[2, 3],
                   esconfig=esconfig,
                   function=i,
                   performance=performance,
                   pflacco=True,
                   localSearch=None)
    suite.runDataGathering()
    performance.saveToCSVPerformance('DataGathering')
    performance.saveToCSVELA('DataGathering')

####################### Data Preprocessing #############################
sampleSize = '2'  # choose between 0 to 2. 0 correspondes to 50D, 1 to 100D, 2 to 200D. 3 to 5 is also available but for generation based scaling
timeSteps = 2  # the number of time steps for the LSTM network

result = Result()

#load the performance and ELA files generated from data gathering
performance = pd.read_csv("./perf/DataGathering_performance.csv")
ela = pd.read_csv("./perf/DataGathering_elaFeatures.csv")

result.addPerformance(performance)
result.addELA(ela)

#this could sometimes fail if the training sample does not contain at least two time steps. This could happen if the CMA-ES finds the optimal value before the 2nd checkpoint
Xtrain, Ytrain = result.createTrainSet(dataset=sampleSize,
                                       algorithm=None,
                                       reset=False,
                                       interface=None,
                                       RNN=timeSteps)
Example #21
0
class Task:
    def __init__(self, segments, settings, segment_generator, effects):
        self.segments = segments
        self.settings = settings
        self.breath_pause = segment_generator.generate_breath_pause()
        self.effects = effects
        self.segment_selector = SegmentSelector(segments)
        self.logger = Logger()
        self.result = Result()
        self.init()

    def init(self):
        random.seed(self.settings.seed)

    def preview(self):
        for segment in self.segments:
            self.result.add_segment(segment)
            self.result.add_segment(self.breath_pause, is_silence=True, record_stats=False)
        self.finalise()

    def finalise(self):
        start_time = time()
        total = len(self.result.parts) + len(self.effects)
        progress = 1
        for part in self.result.parts:
            self.result.audio += part
            elapsed_time = round(time() - start_time, 2)
            self.logger.print_progress(progress, total, suffix=f'Finalising ({elapsed_time}s)', bar_length=32)
            progress += 1
        for effect in self.effects:
            effect.post_finalise(self.result)
            elapsed_time = round(time() - start_time, 2)
            self.logger.print_progress(progress, total, suffix=f'Finalising ({elapsed_time}s)', bar_length=32)
            progress += 1

    def execute(self):
        start_time = time()
        while self.result.get_duration_in_seconds() < self.settings.duration:
            segment = self.segment_selector.get_segment(self.result)
            self.result.add_segment(segment)
            self.result.add_segment(self.breath_pause, is_silence=True, record_stats=False)

            # Calculate current progress
            length = self.result.get_duration_in_seconds()
            total = self.settings.duration
            elapsed_time = round(time() - start_time, 2)

            self.logger.print_progress(length, total, suffix=f'Creating sample ({elapsed_time}s)', bar_length=32)

        # Add the remaining segments that still have a timestamp
        for segment in self.segments:
            if len(segment.timestamps) > 0:
                self.result.add_segment(segment)
                self.result.add_segment(self.breath_pause, is_silence=True, record_stats=False)

        elapsed_time = round(time() - start_time, 2)
        self.logger.print_progress(1, 1, suffix=f'Done ({elapsed_time}s)', bar_length=32)

        self.finalise()
Example #22
0
from src.result import Result


def format_result(entry):
    formatted = f'{entry.query_id}'
    formatted += f' {entry.iteration}'
    formatted += f' {entry.doc_number}'
    formatted += f' {entry.rank}'
    formatted += f' {entry.score}'
    formatted += f' {entry.system}'
    return formatted


def result_writer(results, file):
    open(file, 'a+').write('\n'.join([format_result(entry) for entry in results]) + '\n')


if __name__ == '__main__':
    result1 = Result(10, 'docNumber1', 1, 2.5)
    result2 = Result(10, 'docNumber2', 2, 2.2)
    for result in [result1, result2]:
        print(format_result(result))
Example #23
0
    def run(self):
        first_meassurement = True
        loop_params = None
        state = SensorState.START

        try:
            self._wait_for_mqtt_connection()

            self._reset_timer()  # better testing
            while not self._shutdown:

                if state == SensorState.START:
                    self._process_mqtt_messages()
                    loop_params = self._determine_loop_params()

                if loop_params.on_hold:
                    if state == SensorState.START:
                        if loop_params.use_switch_actor:
                            self._switch_sensor(SwitchSensor.OFF)
                            state = SensorState.SWITCHED_OFF
                        else:
                            self._sensor.open(warm_up=False)  # prepare for sending to sleep!
                            state = SensorState.COOLING_DOWN

                        # skip the first deativation message, hopefully all subscriptions are complete the next time
                        if not first_meassurement or not loop_params.missing_subscriptions:
                            self._handle_result(loop_params, Result(ResultState.DEACTIVATED))
                else:
                    if state == SensorState.START:
                        if loop_params.use_switch_actor:
                            self._switch_sensor(SwitchSensor.ON)
                            state = SensorState.SWITCHING_ON
                        else:
                            state = SensorState.CONNECTING

                    if state == SensorState.SWITCHING_ON and self._time_counter >= loop_params.tlim_switching_on:
                        state = SensorState.CONNECTING

                    if state == SensorState.CONNECTING:
                        self._sensor.open(warm_up=True)
                        state = SensorState.WARMING_UP

                    if state == SensorState.WARMING_UP and self._time_counter >= loop_params.tlim_warming_up:
                        result = self._sensor.measure()
                        self._handle_result(loop_params, result)
                        state = SensorState.COOLING_DOWN

                if state == SensorState.COOLING_DOWN and \
                        (self._time_counter >= loop_params.tlim_cool_down or loop_params.on_hold):
                    self._sensor.close(sleep=loop_params.sensor_sleep)
                    state = SensorState.WAITING_FOR_RESET

                if self._time_counter >= loop_params.tlim_interval:  # any state
                    first_meassurement = False
                    self._reset_timer()
                    state = SensorState.START

                self._wait(self._time_step)

        finally:
            self.close()