Exemple #1
0
 def __init__(self):
     try:
         self.db = torndb.Connection(config.sql_ip, config.sql_database,
                                     config.sql_user, config.sql_pwd)
     except Exception as e:
         logging.ERROR(e)
   |                       <---------------------+                |
   | Registration Scanning |                                      |
   |                       +--------------------------------------+
   +-----------------------+        Location Interrupt Fired

"""

import time
import calendar
import threading
import logging

try:
    import RPi.GPIO as GPIO
except ImportError:
    logging.ERROR(
        'RaspberryPi GPIO is unavailable; please check OS installation')
    exit(-1)

#TODO: import all HackPSU abstraction modules
#import hackpsuLCD as lcd
import HackPSUrfid as rfid
import HackPSUredis as redis
import HackPSUconfig as config
import HackPSUfauxlcd as lcd

global state
state = 1


def getWifi():
    return "XXX%"
Exemple #3
0
 def load(path):
     try:
         with open(path + '/config.json', 'r') as f:
             return json.load(f)
     except FileNotFoundError:
         logging.ERROR("File not found")
Exemple #4
0
    def start_work_and_wait_done(self, fetcher_num=10, is_over=True):
        """
        start this pool, and wait for finishing
        """
        logging.warning("%s start: urls_count=%s, fetcher_num=%s, is_over=%s",
                        self.__class__.__name__,
                        self.get_number_dict(TPEnum.URL_NOT_FETCH),
                        fetcher_num, is_over)

        if isinstance(self._inst_fetcher, (list, tuple)):
            fetcher_list = [
                FetchThread("fetcher-%d" % (i + 1), fetcher, self)
                for (i, fetcher) in enumerate(self._inst_fetcher)
            ]
        else:
            fetcher_list = [
                FetchThread("fetcher-%d" % (i + 1),
                            copy.deepcopy(self._inst_fetcher), self)
                for i in range(fetcher_num)
            ]
        parser_saver_list = [
            ParseThread("parser", self._inst_parser, self),
            SaveThread("saver", self._inst_saver, self)
        ]

        # ----1----
        for thread in fetcher_list:
            thread.setDaemon(True)
            thread.start()

        # ----1----
        for thread in parser_saver_list:
            thread.setDaemon(True)
            thread.start()

        # ----2----
        for thread in fetcher_list:
            if thread.is_alive():
                thread.join()

        # clear the variables if all fetcher stoped
        while self.get_number_dict(TPEnum.URL_NOT_FETCH) > 0:
            priority, url, keys, deep, repeat = self.get_a_task(
                TPEnum.URL_FETCH)
            logging.ERROR(
                "%s error: not fetch, %s", self.__class__.__name__,
                CONFIG_FETCH_MESSAGE % (priority, keys, deep, repeat, url))
            self.finish_a_task(TPEnum.URL_FETCH)

        # ----2----
        for thread in parser_saver_list:
            if thread.is_alive():
                thread.join()

        # ----3----
        if is_over and self._monitor.is_alive():
            self._monitor_stop = True
            self._monitor.join()

        logging.warning(
            "%s end: fetcher_num=%s, is_over=%s, fetch:[SUCC=%d, FAIL=%d]; parse[SUCC=%d, FAIL=%d]; save:[SUCC=%d, FAIL=%d]",
            self.__class__.__name__, fetcher_num, is_over,
            self.get_number_dict(TPEnum.URL_FETCH_SUCC),
            self.get_number_dict(TPEnum.URL_FETCH_FAIL),
            self.get_number_dict(TPEnum.HTM_PARSE_SUCC),
            self.get_number_dict(TPEnum.HTM_PARSE_FAIL),
            self.get_number_dict(TPEnum.ITEM_SAVE_SUCC),
            self.get_number_dict(TPEnum.ITEM_SAVE_FAIL))
        return self._number_dict
              logging.StreamHandler()],
    level=logging.INFO)

config = configparser.ConfigParser()
config.read('bot/config.ini')


def getConfig(name: str):
    return config['DEFAULT'][name]


LOGGER = logging.getLogger(__name__)

try:
    if bool(getConfig('_____REMOVE_THIS_LINE_____')):
        logging.ERROR('The README.md file there to be read! Exiting now!')
        exit()
except KeyError:
    pass

aria2 = aria2p.API(
    aria2p.Client(
        host="https://ariamirrorkerala.herokuapp.com/",
        port=8210,
        secret="bhadoo",
    ))

DOWNLOAD_DIR = None
BOT_TOKEN = None

download_dict_lock = threading.Lock()
Exemple #6
0
def main():
    logging.basicConfig(filename='../ansible.log', level=logging.DEBUG)

    # Define the available arguments/parameters that a user can pass to this module.
    params = {
        'osPlatform':             {'type':'str', 'required':False, 'default':'windows',
                                   'choices':['windows', 'windowsConnectionMgr', 'linux']},
        'forceTakePortOwnership': {'type':'bool', 'required':False, 'default':True},
        'releasePortsWhenDone':   {'type':'bool', 'required':False, 'default':False},
        'enableDebugTracing':     {'type':'bool', 'required':False, 'default':True},
        'deleteSessionAfterTest': {'type':'bool', 'required':False, 'default':True},
        'ixChassisIp':            {'type':'str', 'required':True, 'default':None},
        'portList':               {'type':'list', 'required':True, 'default':None},
        'apiServerIp':            {'type':'str', 'required':True, 'default':None},
        'apiServerIpPort':        {'type':'int', 'required':False, 'default':11009},
        'configLicense':          {'type':'str', 'required':False, 'default':'True', 'no_log':False},
        'linuxUsername':          {'type':'str', 'required':False, 'default':'admin', 'no_log':False},
        'linuxPassword':          {'type':'str', 'required':False, 'default':'password', 'no_log':False},
        'licenseServerIp':        {'type':'str', 'required':True, 'default':None},
        'licenseMode':            {'type':'str', 'required':False, 'default':'subscription'},
        'licenseTier':            {'type':'str', 'required':False, 'default':'tier3'}
    }

    # seed the result dict in the object
    # we primarily care about changed and state
    # change is if this module effectively modified the target
    # state will include any data that you want your module to pass back
    # for consumption, for example, in a subsequent task
    result = dict(
        changed=False,
        original_message='',
        message=''
    )

    # the AnsibleModule object will be our abstraction working with Ansible
    # this includes instantiation, a couple of common attr would be the
    # args/params passed to the execution, as well as if the module
    # supports check mode
    module = AnsibleModule(
        argument_spec = params,
        supports_check_mode = False
    )

    # if the user is working with this module in only check mode we do not
    # want to make any changes to the environment, just return the current
    # state with no modifications
    #if module.check_mode:
    #    return result

    module.params['name'] = 'Custom BGP Module'
    # manipulate or modify the state as needed (this is going to be the
    # part where your module will do what it needs to do)
    result['message'] = 'A custom message'
    result['Result'] = 'Passed'

    # use whatever logic you need to determine whether or not this module
    # made any modifications to your target
    #if module.params['new']:
    #    result['changed'] = True
    #result['changed'] = True

    # during the execution of the module, if there is an exception or a
    # conditional state that effectively causes a failure, run
    # AnsibleModule.fail_json() to pass in the message and the result
    #if module.params['name'] == 'fail me':
    #    module.fail_json(msg='You requested this to fail', **result)

    # in the event of a successful module execution, you will want to
    # simple AnsibleModule.exit_json(), passing the key/value results
    #module.exit_json(**result)
    #module.exit_json(changed=False, meta=module.params)

    # Default the API server to either windows or linux.
    osPlatform = module.params['osPlatform']

    if module.params['osPlatform'] not in ['windows', 'windowsConnectionMgr', 'linux']:
        raise IxNetRestApiException("\nError: %s is not a known option. Choices are 'windows' or 'linux'." % module.params['osPlatform'])
    
    try:
        #---------- Preference Settings --------------

        forceTakePortOwnership = module.params['forceTakePortOwnership']
        releasePortsWhenDone = module.params['releasePortsWhenDone']
        enableDebugTracing = module.params['enableDebugTracing']
        deleteSessionAfterTest = module.params['deleteSessionAfterTest'] ;# For Windows Connection Mgr and Linux API server only

        ixChassisIp = module.params['ixChassisIp']
        # [chassisIp, cardNumber, slotNumber]
        portList = module.params['portList']

        if module.params['osPlatform'] == 'linux':
              mainObj = Connect(apiServerIp = module.params['apiServerIp'],
                                serverIpPort = module.params['apiSeverIpPort'],
                                username = module.params['linuxUsername'],
                                password = module.params['linuxPassword'],
                                deleteSessionAfterTest = module.params['deleteSessionAfterTest'],
                                verifySslCert = False,
                                serverOs = module.params['osPlatform']
                            )

        if module.params['osPlatform'] in ['windows', 'windowsConnectionMgr']:
              mainObj = Connect(apiServerIp = module.params['apiServerIp'],
                                serverIpPort = module.params['apiServerIpPort'],
                                serverOs = module.params['osPlatform'],
                                deleteSessionAfterTest = module.params['deleteSessionAfterTest']
                            )

        #---------- Preference Settings End --------------

        portObj = PortMgmt(mainObj)
        portObj.connectIxChassis(module.params['ixChassisIp'], timeout=2)

        # Exit Ansible playbook test as passed.
        module.exit_json(changed=False)
        #module.exit_json(**result)

    except (IxNetRestApiException, Exception, KeyboardInterrupt) as errMsg:
        module.fail_json(msg=errMsg, **result)
        #module.fail_json(changed=False, meta=traceback.format_exc())

        if module.params['enableDebugTracing']:
            if not bool(re.search('ConnectionError', traceback.format_exc())):
                logging.ERROR('\nMY Failure: %s' % traceback.format_exc())
                #module.fail_json(changed=True, meta=traceback.format_exc())
                #exitArgs = {'module_stderr': traceback.format_exc()}
                #module.exit_json(**exitArgs)

        #logging.ERROR('\nException Error! %s\n' % errMsg)
        if 'mainObj' in locals() and module.params['osPlatform'] == 'linux':
            mainObj.linuxServerStopAndDeleteSession()

        if 'mainObj' in locals() and module.params['osPlatform'] in ['windows', 'windowsConnectionMgr']:
            if releasePortsWhenDone and forceTakePortOwnership:
                portObj.releasePorts(module.params['portList'])

            if module.params['osPlatform'] == 'windowsConnectionMgr':
                mainObj.deleteSession()
Exemple #7
0
def benchmark_querying(n_docs_options, retriever_doc_stores, data_dir,
                       data_s3_url, filename_gold, filename_negative,
                       n_queries, embeddings_filenames, embeddings_dir,
                       update_json, **kwargs):
    """ Benchmark the time it takes to perform querying. Doc embeddings are loaded from file."""
    retriever_results = []

    for n_docs in n_docs_options:
        for retriever_name, doc_store_name in retriever_doc_stores:
            try:
                logger.info(
                    f"##### Start querying run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### "
                )
                doc_store = get_document_store(doc_store_name)
                retriever = get_retriever(retriever_name, doc_store)
                add_precomputed = retriever_name in ["dpr"]
                # For DPR, precomputed embeddings are loaded from file
                docs, labels = prepare_data(
                    data_dir=data_dir,
                    filename_gold=filename_gold,
                    filename_negative=filename_negative,
                    data_s3_url=data_s3_url,
                    embeddings_filenames=embeddings_filenames,
                    embeddings_dir=embeddings_dir,
                    n_docs=n_docs,
                    n_queries=n_queries,
                    add_precomputed=add_precomputed)
                logger.info("Start indexing...")
                index_to_doc_store(doc_store, docs, retriever, labels)
                logger.info("Start queries...")

                raw_results = retriever.eval()
                results = {
                    "retriever":
                    retriever_name,
                    "doc_store":
                    doc_store_name,
                    "n_docs":
                    n_docs,
                    "n_queries":
                    raw_results["n_questions"],
                    "retrieve_time":
                    raw_results["retrieve_time"],
                    "queries_per_second":
                    raw_results["n_questions"] / raw_results["retrieve_time"],
                    "seconds_per_query":
                    raw_results["retrieve_time"] / raw_results["n_questions"],
                    "recall":
                    raw_results["recall"],
                    "map":
                    raw_results["map"],
                    "top_k":
                    raw_results["top_k"],
                    "date_time":
                    datetime.datetime.now(),
                    "error":
                    None
                }

                doc_store.delete_all_documents(index=doc_index)
                doc_store.delete_all_documents(index=label_index)
                time.sleep(5)
                del doc_store
                del retriever
            except Exception as e:
                tb = traceback.format_exc()
                logging.ERROR(
                    f"##### The following Error was raised while running querying run: {retriever_name}, {doc_store_name}, {n_docs} docs #####"
                )
                logging.Error(tb)
                results = {
                    "retriever": retriever_name,
                    "doc_store": doc_store_name,
                    "n_docs": n_docs,
                    "n_queries": 0,
                    "retrieve_time": 0.,
                    "queries_per_second": 0.,
                    "seconds_per_query": 0.,
                    "recall": 0.,
                    "map": 0.,
                    "top_k": 0,
                    "date_time": datetime.datetime.now(),
                    "error": str(tb)
                }
                doc_store.delete_all_documents(index=doc_index)
                doc_store.delete_all_documents(index=label_index)
                time.sleep(5)
                del doc_store
                del retriever
            logger.info(results)
            retriever_results.append(results)

            retriever_df = pd.DataFrame.from_records(retriever_results)
            retriever_df = retriever_df.sort_values(
                by="retriever").sort_values(by="doc_store")
            retriever_df.to_csv(query_results_file)
    if update_json:
        populate_retriever_json()
Exemple #8
0
# Pick the path based on chosen Sort_by
if sort_by == 'hot':
    for idx, submission in enumerate(
            reddit.subreddit(subreddit).hot(limit=number_of_posts)):
        try:
            download_all(submission, IMAGE_DIRECTORY)
        except:
            logging.info('Failed to get image. | ' + submission.url + ' | ' +
                         submission.title)
elif sort_by == 'top':
    for idx, submission in enumerate(
            reddit.subreddit(subreddit).top(sort_time, limit=number_of_posts)):
        try:
            download_all(submission, IMAGE_DIRECTORY)
        except:
            logging.info('Failed to get image. | ' + submission.url + ' | ' +
                         submission.title)
elif sort_by == 'new':
    for idx, submission in enumerate(
            reddit.subreddit(subreddit).new(limit=number_of_posts)):
        try:
            download_all(submission, IMAGE_DIRECTORY)
        except:
            logging.info('Failed to get image. | ' + submission.url + ' | ' +
                         submission.title)

try:
    sort_downloads(IMAGE_DIRECTORY)
except:
    logging.ERROR('Failed to sort images')
feature_file_path_test = './data/features-test-withId.csv'

plot_feature_imp_file_path = './plot/featImp-'
plot_model_corr_test_file_path = './plot/modelCorr-test.png'
plot_model_corr_train_file_path = './plot/modelCorr-train.png'

plot_feature_importance = False
plot_model_correlation = True
read_feature_from_file = True
remove_bad_feature = False

# Read feature data or original data

if (read_feature_from_file):
    if (not isfile(feature_file_path_train)):
        logging.ERROR('Cannot find the training feature file.')
        sys.exit(1)
    if (not isfile(feature_file_path_test)):
        logging.ERROR('Cannot find the testing feature file.')
        sys.exit(1)

    logging.info('Reading training feature data: %s.' %
                 feature_file_path_train)
    feature_df = pd.read_csv(feature_file_path_train, sep='\t')
    feature_df.drop(['Unnamed: 0'], axis=1,
                    inplace=True)  # drop 'Unnamed index'
    # TODO - feature filtering
    print(feature_df.shape)

    logging.info('Reading testing feature data: %s.' % feature_file_path_test)
    test_x = pd.read_csv(feature_file_path_test, sep='\t')
Exemple #10
0
                              'Medium_image',
                              'Small_image',
                              'Popularity',
                              'Type',
                          ])
        LocalFileName = OutputFileName + '.csv'
        FinalOutputFileName = OutputFileName + '_' + now.strftime(
            '%Y-%m-%d') + '.csv'
        df.to_csv(LocalFileName, sep=',', index=False)

        print('Operation completed')
    # with pysftp.Connection(host=myHostname, username=myUsername, password=myPassword) as sftp_upload:
    #     print("Upload connection succesfully stablished ... \n")
    if 1 == 1:

        remotepath = r'/Import/Spotify/' + FinalOutputFileName
        localpath = LocalFileName

        # o_var = sftp_upload.put(localpath, remotepath, confirm=False)
        # print('o_var:', o_var)
        # sftp_upload.close()
        print("File uploaded... \n")
    # In[ ]:
    exit(200)

except Exception as e1:
    print('[Error-1]:', e1)
    logging.ERROR("Error Exception Occured: ", exc_info=True)
    logging.critical("Critical Exception Occured Error: ", exc_info=True)
    logging.warning("Warning Exception Occured Error: ", exc_info=True)
Exemple #11
0
 def parse_music(self, response):
     item = MusicItem()
     try:
         item['music_name'] = response.xpath(
             '//*[@id="wrapper"]/h1/span/text()').extract()[0]
         content = "".join(response.xpath('//*[@id="info"]').extract())
         info = response.xpath('//*[@id="info"]/span').extract()
         item['music_alias'] = ""
         item['music_singer'] = ""
         item['music_time'] = ""
         for i in range(0, len(info)):
             if "又名" in info[i]:
                 if i == 0:
                     item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[1] \
                         .replace("\xa0", "").replace("\n", "").rstrip()
                 elif i == 1:
                     item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[2] \
                         .replace("\xa0", "").replace("\n", "").rstrip()
                 elif i == 2:
                     item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[3] \
                         .replace("\xa0", "").replace("\n", "").rstrip()
                 else:
                     item['music_alias'] = ""
                     # break
             if "表演者" in info[i]:
                 if i == 0:
                     item['music_singer'] = "|".join(
                         response.xpath(
                             '//*[@id="info"]/span[1]/span/a/text()').
                         extract())
                 elif i == 1:
                     item['music_singer'] = "|".join(
                         response.xpath(
                             '//*[@id="info"]/span[2]/span/a/text()').
                         extract())
                 elif i == 2:
                     item['music_singer'] = "|".join(
                         response.xpath(
                             '//*[@id="info"]/span[3]/span/a/text()').
                         extract())
                 else:
                     item['music_singer'] = ""
                     # break
             if "发行时间" in info[i]:
                 nbsp = re.findall(
                     r"<span class=\"pl\">发行时间:</span>(.*?)<br>", content,
                     re.S)
                 item['music_time'] = "".join(nbsp).replace(
                     "\xa0", "").replace("\n", "").replace(" ", "")
                 # break
         try:
             item['music_rating'] = "".join(
                 response.xpath(
                     '//*[@class="rating_self clearfix"]/strong/text()').
                 extract())
             item['music_votes'] = "".join(
                 response.xpath(
                     '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()'
                 ).extract())
         except Exception as error:
             item['music_rating'] = '0'
             item['music_votes'] = '0'
             logging.ERROR(error)
         item['music_tags'] = "|".join(
             response.xpath(
                 '//*[@id="db-tags-section"]/div/a/text()').extract())
         item['music_url'] = response.url
         yield item
     except Exception as error:
         logging.ERROR(error)
def analyze_decode_results(dataset, decode_results, verbose=True):
    from lang.py.parse import tokenize_code, de_canonicalize_code
    # tokenize_code = tokenize_for_bleu_eval
    import ast
    assert dataset.count == len(decode_results)

    f = f_decode = None
    if verbose:
        f = open(dataset.name + '.exact_match', 'w')
        exact_match_ids = []
        f_decode = open(dataset.name + '.decode_results.txt', 'w')
        eid_to_annot = dict()

        if data_type == 'django':
            for raw_id, line in enumerate(open(DJANGO_ANNOT_FILE)):
                eid_to_annot[raw_id] = line.strip()

        f_bleu_eval_ref = open(dataset.name + '.ref', 'w')
        f_bleu_eval_hyp = open(dataset.name + '.hyp', 'w')

        logging.info('evaluating [%s] set, [%d] examples', dataset.name, dataset.count)

    cum_oracle_bleu = 0.0
    cum_oracle_acc = 0.0
    cum_bleu = 0.0
    cum_acc = 0.0
    sm = SmoothingFunction()

    all_references = []
    all_predictions = []

    if all(len(cand) == 0 for cand in decode_results):
        logging.ERROR('Empty decoding results for the current dataset!')
        return -1, -1

    binned_results_dict = defaultdict(list)
    def get_binned_key(ast_size):
        cutoff = 50 if data_type == 'django' else 250
        k = 10 if data_type == 'django' else 25 # for hs

        if ast_size >= cutoff:
            return '%d - inf' % cutoff

        lower = int(ast_size / k) * k
        upper = lower + k

        key = '%d - %d' % (lower, upper)

        return key


    for eid in range(dataset.count):
        example = dataset.examples[eid]
        ref_code = example.code
        ref_ast_tree = ast.parse(ref_code).body[0]
        refer_source = astor.to_source(ref_ast_tree).strip()
        # refer_source = ref_code
        refer_tokens = tokenize_code(refer_source)
        cur_example_acc = 0.0

        decode_cands = decode_results[eid]
        if len(decode_cands) == 0:
            continue

        decode_cand = decode_cands[0]

        cid, cand, ast_tree, code = decode_cand
        code = astor.to_source(ast_tree).strip()

        # simple_url_2_re = re.compile('_STR:0_', re.))
        try:
            predict_tokens = tokenize_code(code)
        except:
            logging.error('error in tokenizing [%s]', code)
            continue

        if refer_tokens == predict_tokens:
            cum_acc += 1
            cur_example_acc = 1.0

            if verbose:
                exact_match_ids.append(example.raw_id)
                f.write('-' * 60 + '\n')
                f.write('example_id: %d\n' % example.raw_id)
                f.write(code + '\n')
                f.write('-' * 60 + '\n')

        if data_type == 'django':
            ref_code_for_bleu = example.meta_data['raw_code']
            pred_code_for_bleu = de_canonicalize_code(code, example.meta_data['raw_code'])
            # ref_code_for_bleu = de_canonicalize_code(ref_code_for_bleu, example.meta_data['raw_code'])
            # convert canonicalized code to raw code
            for literal, place_holder in example.meta_data['str_map'].iteritems():
                pred_code_for_bleu = pred_code_for_bleu.replace('\'' + place_holder + '\'', literal)
                # ref_code_for_bleu = ref_code_for_bleu.replace('\'' + place_holder + '\'', literal)
        elif data_type == 'hs':
            ref_code_for_bleu = ref_code
            pred_code_for_bleu = code

        # we apply Ling Wang's trick when evaluating BLEU scores
        refer_tokens_for_bleu = tokenize_for_bleu_eval(ref_code_for_bleu)
        pred_tokens_for_bleu = tokenize_for_bleu_eval(pred_code_for_bleu)

        shorter = len(pred_tokens_for_bleu) < len(refer_tokens_for_bleu)

        all_references.append([refer_tokens_for_bleu])
        all_predictions.append(pred_tokens_for_bleu)

        # try:
        ngram_weights = [0.25] * min(4, len(refer_tokens_for_bleu))
        bleu_score = sentence_bleu([refer_tokens_for_bleu], pred_tokens_for_bleu, weights=ngram_weights, smoothing_function=sm.method3)
        cum_bleu += bleu_score
        # except:
        #    pass

        if verbose:
            print 'raw_id: %d, bleu_score: %f' % (example.raw_id, bleu_score)

            f_decode.write('-' * 60 + '\n')
            f_decode.write('example_id: %d\n' % example.raw_id)
            f_decode.write('intent: \n')

            if data_type == 'django':
                f_decode.write(eid_to_annot[example.raw_id] + '\n')
            elif data_type == 'hs':
                f_decode.write(' '.join(example.query) + '\n')

            f_bleu_eval_ref.write(' '.join(refer_tokens_for_bleu) + '\n')
            f_bleu_eval_hyp.write(' '.join(pred_tokens_for_bleu) + '\n')

            f_decode.write('canonicalized reference: \n')
            f_decode.write(refer_source + '\n')
            f_decode.write('canonicalized prediction: \n')
            f_decode.write(code + '\n')
            f_decode.write('reference code for bleu calculation: \n')
            f_decode.write(ref_code_for_bleu + '\n')
            f_decode.write('predicted code for bleu calculation: \n')
            f_decode.write(pred_code_for_bleu + '\n')
            f_decode.write('pred_shorter_than_ref: %s\n' % shorter)
            # f_decode.write('weired: %s\n' % weired)
            f_decode.write('-' * 60 + '\n')

        # compute oracle
        best_bleu_score = 0.
        cur_oracle_acc = 0.
        for ast_tree in decode_results:
            try:
                code = astor.to_source(ast_tree).strip()
                predict_tokens = tokenize_code(code)

                if predict_tokens == refer_tokens:
                    cur_oracle_acc = 1.

                if data_type == 'django':
                    pred_code_for_bleu = de_canonicalize_code(code, example.meta_data['raw_code'])
                    # convert canonicalized code to raw code
                    for literal, place_holder in example.meta_data['str_map'].iteritems():
                        pred_code_for_bleu = pred_code_for_bleu.replace('\'' + place_holder + '\'', literal)
                elif data_type == 'hs':
                    pred_code_for_bleu = code

                # we apply Ling Wang's trick when evaluating BLEU scores
                pred_tokens_for_bleu = tokenize_for_bleu_eval(pred_code_for_bleu)

                ngram_weights = [0.25] * min(4, len(refer_tokens_for_bleu))
                cand_bleu_score = sentence_bleu([refer_tokens_for_bleu], pred_tokens_for_bleu,
                                                weights=ngram_weights,
                                                smoothing_function=sm.method3)

                if cand_bleu_score > best_bleu_score:
                    best_bleu_score = cand_bleu_score

            except:
                continue

        cum_oracle_bleu += best_bleu_score
        cum_oracle_acc += cur_oracle_acc

        ref_ast_size = example.parse_tree.size
        binned_key = get_binned_key(ref_ast_size)
        binned_results_dict[binned_key].append((bleu_score, cur_example_acc, best_bleu_score, cur_oracle_acc))

    cum_bleu /= dataset.count
    cum_acc /= dataset.count
    cum_oracle_bleu /= dataset.count
    cum_oracle_acc /= dataset.count

    logging.info('corpus level bleu: %f', corpus_bleu(all_references, all_predictions, smoothing_function=sm.method3))
    logging.info('sentence level bleu: %f', cum_bleu)
    logging.info('accuracy: %f', cum_acc)
    logging.info('oracle bleu: %f', cum_oracle_bleu)
    logging.info('oracle accuracy: %f', cum_oracle_acc)

    keys = sorted(binned_results_dict, key=lambda x: int(x.split(' - ')[0]))

    Y = [[], [], [], []]
    X = []

    for binned_key in keys:
        entry = binned_results_dict[binned_key]
        avg_bleu = np.average([t[0] for t in entry])
        avg_acc = np.average([t[1] for t in entry])
        avg_oracle_bleu = np.average([t[2] for t in entry])
        avg_oracle_acc = np.average([t[3] for t in entry])
        print binned_key, avg_bleu, avg_acc, avg_oracle_bleu, avg_oracle_acc, len(entry)

        Y[0].append(avg_bleu)
        Y[1].append(avg_acc)
        Y[2].append(avg_oracle_bleu)
        Y[3].append(avg_oracle_acc)

        X.append(int(binned_key.split(' - ')[0]))

    import matplotlib.pyplot as plt
    from pylab import rcParams
    rcParams['figure.figsize'] = 6, 2.5

    if data_type == 'django':
        fig, ax = plt.subplots()
        ax.plot(X, Y[0], 'bs--', label='BLEU', lw=1.2)
        # ax.plot(X, Y[2], 'r^--', label='oracle BLEU', lw=1.2)
        ax.plot(X, Y[1], 'r^--', label='acc', lw=1.2)
        # ax.plot(X, Y[3], 'r^--', label='oracle acc', lw=1.2)
        ax.set_ylabel('Performance')
        ax.set_xlabel('Reference AST Size (# nodes)')
        plt.legend(loc='upper right', ncol=6)
        plt.tight_layout()
        # plt.savefig('django_acc_ast_size.pdf', dpi=300)
        # os.system('pcrop.sh django_acc_ast_size.pdf')
        plt.savefig('django_perf_ast_size.pdf', dpi=300)
        os.system('pcrop.sh django_perf_ast_size.pdf')
    else:
        fig, ax = plt.subplots()
        ax.plot(X, Y[0], 'bs--', label='BLEU', lw=1.2)
        # ax.plot(X, Y[2], 'r^--', label='oracle BLEU', lw=1.2)
        ax.plot(X, Y[1], 'r^--', label='acc', lw=1.2)
        # ax.plot(X, Y[3], 'r^--', label='oracle acc', lw=1.2)
        ax.set_ylabel('Performance')
        ax.set_xlabel('Reference AST Size (# nodes)')
        plt.legend(loc='upper right', ncol=6)
        plt.tight_layout()
        # plt.savefig('hs_bleu_ast_size.pdf', dpi=300)
        # os.system('pcrop.sh hs_bleu_ast_size.pdf')
        plt.savefig('hs_perf_ast_size.pdf', dpi=300)
        os.system('pcrop.sh hs_perf_ast_size.pdf')
    if verbose:
        f.write(', '.join(str(i) for i in exact_match_ids))
        f.close()
        f_decode.close()

        f_bleu_eval_ref.close()
        f_bleu_eval_hyp.close()

    return cum_bleu, cum_acc
 def test_something(self):
     """
     This method executes the algorithm defined in the class RFDDiscovery for each dataset in the directory 
     and for
     each combination of rhs and lhs of them. For each execution of the algorithm, the method saves some information:
         - the dataset's name;
         - the dataset rows' number;
         - number of column;
         - the dataset file's size;
         - the algorithm's elapsed time;
         - the number of RFDs found;
         - the combination of rhs and lhs used for the iteration;
         - the number of the iteration executed on that combination.
     When the test ends, it saves all the information described above in a CSV file with the name
     <date of test>-result-c.csv. During the test, some log information will be printed.
     """
     test_count = 1
     logging.info("Starting test")
     result_df = pd.DataFrame(columns=cols)  # Data frame in which save results
     path = "./resources"  # path in which datasets are stored
     datasets = self.__load_all_files__(path)
     logging.info("All files loaded")
     for ds in datasets:
         logging.info("Starting test for dataset {}".format(ds))
         current_ds = path + "/" + ds                                # abs path for current dataset
         file_size = os.stat(current_ds).st_size                     # get file size
         logging.info("Checking separator and header for dataset {}".format(ds))
         try:
             c_sep, has_header = ut.check_sep_n_header(current_ds)
         except Exception as ex:
             logging.ERROR("Failed to load separator and header. Skipping test for {}".format(ds))
             pass
         logging.info("{} has separator '{}' and has {} header".format(ds, c_sep, "no" if has_header is None else ""))
         ds_shape = self.__get_ds_shape(current_ds, sep=c_sep, first_row_head=has_header)  # get df shape
         lhs_vs_rhs = ut.get_hs_combination(ds_shape['col'])     # combination for HS
         diff_matrix, elapsed_time_dist = self.__get_diff_mtx(c_sep, current_ds, has_header)
         for combination in lhs_vs_rhs:
             logging.info("Testing on combination: {}".format(str(combination)))
             dist_mtx = diff_matrix.split_sides(combination)
             for i in range(ITERATION_TIME):                         # repeat test X times
                 logging.info("Test no.{}".format(i))
                 start_time = time.time()                            # get t0
                 rfdd = RFDDiscovery(dist_mtx)
                 compiled = rfdd.is_compiled()
                 rfd_df = rfdd.get_rfds(rfdd.standard_algorithm, combination)
                 elapsed_time = time.time() - start_time             # get deltaT = now - t0
                 logging.info("RFDs discovery process finished")
                 rfd_count = rfd_df.shape[0]
                 logging.info("Discovered {} RFDs".format(rfd_count))
                 logging.info("Result added")
                 logging.info("Appending result to result's dataframe")
                 # append to result df
                 self.__append_result(ds, ds_shape['row'], ds_shape['col'], file_size, round(elapsed_time*1000,3),
                                      round(elapsed_time_dist*1000,3), rfd_count, str(combination), result_df)
                 test_count += 1
                 elapsed_time_dist = 0
         diff_mtx = None  # for free unused memory
     logging.info("Saving file")
     abs_path = os.path.abspath("./resources/test/{}-results-{}.csv"
                                .format(time.strftime("%Y-%m-%d_%H-%M-%S"), "c" if compiled else "p"))
     result_df.to_csv(abs_path, sep=";", header=cols, decimal=',')
     logging.info("File saved")
Exemple #14
0
trackl_configdir = os.path.expanduser("~/.config/trackl")
logging.basicConfig(filename=trackl_configdir + "/log.log",
                    level=logging.DEBUG)
logging.debug("gtk loaded")

try:
    #Importando las dependencias de la interfaz
    import gi
    gi.require_version('Gtk', '3.0')
    from gi.repository import Gtk, GObject, Gdk, GdkPixbuf
    from gi.repository.GdkPixbuf import Pixbuf
except Exception:
    lprint(
        "Por favor, instala PyGObject en tu ordenador. \n  En ubuntu suele ser 'apt-get install python3-gi'\n  En Archlinux es 'pacman -S python-gobject'"
    )
    logging.ERROR(Exception)
    sys.exit()

maindir, this_filename = os.path.split(__file__)
gladefile = maindir + "/Gtk.glade"

try:
    builder = Gtk.Builder()
    builder.add_from_file(gladefile)
    xmlroot = xmltree.parse(gladefile).getroot()
    print("Necesario Gtk+ " + xmlroot[0].attrib["version"] + ".0", end="")
    print(" | Usando Gtk+ " + str(Gtk.get_major_version()) + "." +
          str(Gtk.get_minor_version()) + "." + str(Gtk.get_micro_version()))
except Exception as e:
    print("Error: No se ha podido cargar la interfaz.")
    if "required" in str(e):
def errprint(st):
    logging.ERROR(st + "\n")
                    format='%(asctime)s - %(levelname)s - %(message)s')
logging.debug('Program Started')
print('Program Started')

# In[22]:

logging.debug('Loading Data into Dataframe')
print('Loading Data into Dataframe')
try:
    df_loaded = pd.read_csv("../home/energydata_complete.csv")
    df = df_loaded
    logging.debug('Data Size' + str(df.shape))
    print('Data Size' + str(df.shape))

except:
    logging.ERROR('Data logging failed')
    print('Data logging failed')

# In[23]:

logging.debug("Tranforming date time")
print("Tranforming date time")
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d %H:%M:%S', utc=True)

# In[24]:

logging.debug('Creating Column NSM, week_status, day_of_week')
print('Creating Column NSM, week_status, day_of_week')
df['NSM'] = df.date.apply(lambda x: x.hour * 3600 + x.minute * 60 + x.second)
df['day_of_week'] = df.date.apply(lambda x: x.dayofweek)
df['week_status'] = df.day_of_week.apply(lambda x: 0
Exemple #17
0
def main():
    """Main function."""

    printer, textemplate, string, printers, preview, printit, debug, amount, cleanup, templates = get_args(
    )

    logging.basicConfig(format='%(message)s')

    if templates:
        list_templates()
        exit(0)

    if printers:
        list_printers()
        exit(0)

    if not debug:
        logging.getLogger().setLevel(logging.INFO)
    elif debug == 'warning':
        logging.getLogger().setLevel(logging.WARNING)

    if not textemplate and not cleanup:
        logging.error('No template selected. Parameter --template used?')

    if not string:
        logging.error('No data selected --string parameter used.')
        exit(0)

    else:
        #file = open(home + '/.labelprinter/templates/data.tex', 'w')
        #logging.info('Content writen…')
        #file.write(string)
        #file.close()
        logging.info('…done')

    if preview and printit:
        logging.ERROR('Dont use --preview and --printit together')
        exit(0)

    with open('~/.labelprinter/templates/template_' + textemplate,
              'r') as template:
        data = template.read()
        with open('~/.labelprinter/templates/' + textemplate, 'w') as letter:
            letter.write(LaTeXTemplate(data).substitute(string=string))
        letter.close()
    template.close()

    #if not debug:
    #    latex = subprocess.Popen(['pdflatex', home + '/.labelprinter/templates/' + template], stdout=open(os.devnull, 'wb'))
    #else:
    #    latex = subprocess.Popen(['pdflatex', home + '/.labelprinter/templates/' + template])

    #logging.info('Compiling Latex…')

    #for x in range(3):
    #    latex.communicate()

    #logging.info('Compiling Latex done')

    #pdffile = template.replace('.tex', '.pdf')

    #if preview:
    #    if not debug:
    #        pdfview = subprocess.Popen(['xreader', pdffile], stderr=open(os.devnull, 'wb'))
    #    else:
    #        pdfview = subprocess.Popen(['xreader', pdffile])

    #   logging.info('Opening preview PDF file')
    #    pdfview.communicate()

    if not amount:
        amount = 1

    #if printit:
    #    for x in range(amount):
    #        con = cups.Connection()
    ##       logging.info('Print PDF file')
    #       con.printFile(printer, pdffile, template, {})

    #os.rename(pdffile, pdffile.replace('.pdf', '') + '--' + string + '.pdf')

    if cleanup:
        for file in os.listdir(os.getcwd()):
            suffix = file.split('.')[-1]
            if suffix == "aux" or suffix == 'log':
                os.remove(file)
        logging.info('Temp files deleted')
        exit(0)
def _validate_and_upload(directory_status):
    """
    This function attempts to upload a single run directory

    Handles parsing and validating the directory for samples
    Sets up the api layer based on config file
    Verifies samples is able to be uploaded (verifies projects exist)
    Initializes objects/routes on IRIDA to accept Samples (creates samples if they don't exist)
    Starts the upload

    :param directory_status: DirectoryStatus object that has directory to try upload
    :return: ExitReturn
    """
    logging_start_block(directory_status.directory)
    logging.debug("upload_run_single_entry:Starting {}".format(
        directory_status.directory))

    # Add progress file to directory
    try:
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.PARTIAL)
    except progress.exceptions.DirectoryError as e:
        logging.error(
            "ERROR! Error while trying to write status file to directory {} with error message: {}"
            "".format(e.directory, e.message))
        logging.info("Samples not uploaded!")
        return exit_error()

    # Do parsing (Also offline validation)
    try:
        sequencing_run = parsing_handler.parse_and_validate(
            directory_status.directory)
    except parsers.exceptions.DirectoryError as e:
        # Directory was not valid for some reason
        full_error = "ERROR! An error occurred with directory '{}', with message: {}".format(
            e.directory, e.message)
        logging.error(full_error)
        logging.info("Samples not uploaded!")
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(e)
    except parsers.exceptions.ValidationError as e:
        # Sequencing Run / SampleSheet was not valid for some reason
        error_msg = "ERROR! Errors occurred during validation with message: {}".format(
            e.message)
        logging.error(error_msg)
        error_list_msg = "Error list: " + pformat(
            e.validation_result.error_list)
        logging.error(error_list_msg)
        logging.info("Samples not uploaded!")
        full_error = error_msg + ", " + error_list_msg
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(e)

    # Initialize the api for first use
    logging.info("*** Connecting to IRIDA ***")
    try:
        api_handler.initialize_api_from_config()
    except api.exceptions.IridaConnectionError as e:
        logging.error("ERROR! Could not initialize irida api.")
        logging.error("Errors: " + pformat(e.args))
        logging.info("Samples not uploaded!")
        full_error = "ERROR! Could not initialize irida api. Errors: " + pformat(
            e.args)
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(e)
    logging.info("*** Connected ***")

    logging.info("*** Verifying run (online validation) ***")
    try:
        validation_result = api_handler.prepare_and_validate_for_upload(
            sequencing_run)
    except api.exceptions.IridaConnectionError as e:
        logging.error("Lost connection to Irida")
        logging.error("Errors: " + pformat(e.args))
        full_error = "Lost connection to Irida. Errors: " + pformat(e.args)
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(e)

    if not validation_result.is_valid():
        logging.error("Sequencing run can not be uploaded")
        logging.error(
            "Sequencing run can not be uploaded. Encountered {} errors"
            "".format(validation_result.error_count()))
        logging.error("Errors: " + pformat(validation_result.error_list))
        full_error = "Sequencing run can not be uploaded, Errors: " + pformat(
            validation_result.error_list)
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(full_error)
    logging.info("*** Run Verified ***")

    # Start upload
    logging.info("*** Starting Upload ***")
    try:
        run_id = api_handler.upload_sequencing_run(sequencing_run)
    except api.exceptions.IridaConnectionError as e:
        logging.error("Lost connection to Irida")
        logging.error("Errors: " + pformat(e.args))
        full_error = "Lost connection to Irida. Errors: " + pformat(e.args)
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(e)
    except api.exceptions.IridaResourceError as e:
        logging.error("Could not access IRIDA resource")
        logging.error("Errors: " + pformat(e.args))
        full_error = "Could not access IRIDA resource Errors: " + pformat(
            e.args)
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(e)
    except api.exceptions.FileError as e:
        logging.error("Could not upload file to IRIDA")
        logging.error("Errors: " + pformat(e.args))
        full_error = "Could not upload file to IRIDA. Errors: " + pformat(
            e.args)
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.ERROR, full_error)
        return exit_error(e)
    logging.info("*** Upload Complete ***")

    # Set progress file to complete
    try:
        _set_and_write_directory_status(directory_status,
                                        DirectoryStatus.COMPLETE,
                                        run_id=run_id)
    except progress.exceptions.DirectoryError as e:
        # this is an exceptionally rare case (successful upload, but fails to write progress)
        logging.ERROR(
            "ERROR! Error while trying to write status file to directory {} with error message: {}"
            "".format(e.directory, e.message))
        logging.info(
            "Samples were uploaded, but progress file may be incorrect!")

    logging.info("Samples in directory '{}' have finished uploading!".format(
        directory_status.directory))

    logging_end_block()

    return exit_success()
Exemple #19
0
def reduce_process(opts,
                   add_opts,
                   output_queue,
                   spool_length,
                   out_file=None,
                   file_size=0):
    """Pull finished article text, write series of files (or stdout)
    :param add_opts: additional options
    :param opts: global parameters.
    :param output_queue: text to be output.
    :param spool_length: spool length.
    :param out_file: filename where to print.
    :param file_size: max file size.
    :param file_compress: whether to compress output.
    """
    global options
    options = opts
    global tsdw_options
    tsdw_options = add_opts

    createLogger(options.quiet, options.debug, options.log_file)

    # set up files for output
    if out_file:
        nextFile_1 = NextFile(out_file + "/tmp")
        nextFile_2 = NextFile(out_file + "/data")
        output_1 = OutputSplitter(nextFile_1, file_size, False)
        output_2 = CsvSplitter(nextFile_2, file_size, False)
    else:
        logging.ERROR("Couldn't determine an output path")

    interval_start = default_timer()

    spool = {}  # collected pages
    next_page = 0  # sequence numbering of

    while True:
        if next_page in spool:
            id, page_num, page_res, title, text = spool.pop(next_page)

            # write output
            output_1.write(text.encode('utf-8'))
            for line in format_pairs(id, page_res):
                output_2.write(line)
            logging.info('%s %s', id, title)
            next_page += 1

            # tell mapper our load:
            spool_length.value = len(spool)
            # progress report
            if next_page % report_period == 0:
                interval_rate = report_period / (default_timer() -
                                                 interval_start)
                logging.info("Extracted %d articles (%.1f art/s)", next_page,
                             interval_rate)
                interval_start = default_timer()
        else:
            # mapper puts None to signal finish
            next = output_queue.get()
            if not next:
                break
            page_num = next[1]
            spool[page_num] = next
            # tell mapper our load:
            spool_length.value = len(spool)
            # FIXME: if an extractor dies, process stalls; the other processes
            # continue to produce pairs, filling up memory.
            if len(spool) > 200:
                logging.debug('Collected %d, waiting: %d, %d', len(spool),
                              next_page, next_page == page_num)

    output_1.close()
    output_2.close()
Exemple #20
0
    for oid, loc, dir, id in zip(new_file[0], new_file[1], new_file[2],
                                 new_file[3])
}  # creating a dictionary for the new metric id's

try:
    # connects to db
    connection = pymysql.connect(host='172.30.0.141',
                                 user='******',
                                 password='******',
                                 port=3307,
                                 db='sa',
                                 cursorclass=pymysql.cursors.DictCursor)
    connection.autocommit = True
    logging.info("Connected to database")
except:
    logging.ERROR("Connection to database failed")

cursor = connection.cursor()  #obtaining the cursor

# deleting the old files if they are present
if os.path.isfile('db_ids.csv'):
    os.remove('db_ids.csv')

if os.path.isfile('db_ids_notfound.csv'):
    os.remove('db_ids_notfound.csv')

# copying the metric from old_metric_data.csv file and searching it in new_metric_data.csv
for metric in zip(old_file[0], old_file[1], old_file[2], old_file[3]):
    search_metric = (metric[0], metric[1], metric[2])

    if search_metric in new_file_dict.keys():
Exemple #21
0
def benchmark_indexing(n_docs_options, retriever_doc_stores, data_dir,
                       filename_gold, filename_negative, data_s3_url,
                       embeddings_filenames, embeddings_dir, update_json,
                       **kwargs):

    retriever_results = []
    for n_docs in n_docs_options:
        for retriever_name, doc_store_name in retriever_doc_stores:
            logger.info(
                f"##### Start indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### "
            )
            try:
                doc_store = get_document_store(doc_store_name)
                retriever = get_retriever(retriever_name, doc_store)
                docs, _ = prepare_data(
                    data_dir=data_dir,
                    filename_gold=filename_gold,
                    filename_negative=filename_negative,
                    data_s3_url=data_s3_url,
                    embeddings_filenames=embeddings_filenames,
                    embeddings_dir=embeddings_dir,
                    n_docs=n_docs)

                tic = perf_counter()
                index_to_doc_store(doc_store, docs, retriever)
                toc = perf_counter()
                indexing_time = toc - tic

                print(indexing_time)

                retriever_results.append({
                    "retriever": retriever_name,
                    "doc_store": doc_store_name,
                    "n_docs": n_docs,
                    "indexing_time": indexing_time,
                    "docs_per_second": n_docs / indexing_time,
                    "date_time": datetime.datetime.now(),
                    "error": None
                })
                retriever_df = pd.DataFrame.from_records(retriever_results)
                retriever_df = retriever_df.sort_values(
                    by="retriever").sort_values(by="doc_store")
                retriever_df.to_csv(index_results_file)
                doc_store.delete_all_documents(index=doc_index)
                doc_store.delete_all_documents(index=label_index)
                time.sleep(10)
                del doc_store
                del retriever

            except Exception as e:
                tb = traceback.format_exc()
                logging.ERROR(
                    f"##### The following Error was raised while running indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs #####"
                )
                logging.Error(tb)
                retriever_results.append({
                    "retriever": retriever_name,
                    "doc_store": doc_store_name,
                    "n_docs": n_docs,
                    "indexing_time": 0,
                    "docs_per_second": 0,
                    "date_time": datetime.datetime.now(),
                    "error": str(tb)
                })
                doc_store.delete_all_documents(index=doc_index)
                doc_store.delete_all_documents(index=label_index)
                time.sleep(10)
                del doc_store
                del retriever
    if update_json:
        populate_retriever_json()
    def import_control_file(self):
        """Returns the control file"""
        
        if not exists(join(self.model_dir, self.control_file)):
            logging.ERROR("HeatSource_Control.csv not \
            found {0}".format(join(self.model_dir,self.control_file)))
            
            raise Exception("HeatSource_Control.csv not found. \
            Move the executable or place the control file in \
            this directory: {0}.".format(self.model_dir))
        
        msg = "Reading control file"
        logging.info(msg)
        print_console(msg)
        
        cf_dict = self.control_file_dict()
        cf_list = self.read_to_list(self.model_dir,
                                 self.control_file,
                                 skiprows=1, skipcols=0)
        
        # set up a list to check if a missing value in
        # the control file is ok
        if IniParams["run_type"] == 0:
            # For temperature runs None is ok for these inputs
            none_ok = ["usertxt", "name"]
            
        elif IniParams["run_type"] == 1:
            # For solar runs None is ok for these inputs
            none_ok = ["usertxt", "name","flushdays","bcfile",
                       "inflowsites", "inflowinfiles", "inflowkm",
                       "accretionfile", "calcevap", "evapmethod",
                       "wind_a", "wind_b", "calcalluvium", "alluviumtemp"]
                    
        elif IniParams["run_type"] == 2:
            # For hydraulic runs None is ok for these inputs
            none_ok = ["usertxt", "name","lcdatafile", "lccodefile",
                       "trans_count", "transsample_count",
                       "transsample_distance", "emergent",
                       "lcdatainput", "canopy_data", "lcsampmethod",
                       "point"]
        else:
            # This is a setup call, None is ok for all of them
            none_ok = IniParams.keys()
        
        # This is so the iteration happens in descending order
        # so some of the keys are parameterized earlier for the
        # none list. 
        keys = cf_dict.keys()
        keys.sort(reverse=True)
            
        for k in keys:
            for line in cf_list:
                if (line[2] == k):
                    # check for missing values
                    if str.strip(line[3]) in ["", None]:
                        if k in none_ok:
                            IniParams[k] = None
                        elif (k == "lccodefile" and
                            IniParams["lcdatainput"] == "Values"):
                            # None ok because lccodes file is not needed
                            IniParams[k] = None
                            
                        elif (k in ["inflowinfiles","inflowkm"] and
                              IniParams["inflowsites"] == 0):
                            # None ok because there are no inflow sites
                            IniParams[k] = None
                            
                        elif (k == "alluviumtemp" and
                              IniParams["calcalluvium"] is False):
                            # None ok because not including 
                            # deep alluvium temps
                            IniParams[k] = None
                        else:
                            raise TypeError("Value in control file line {0} is missing".format(line[0]))
                    # now make sure it's the correct data type
                    elif dtype[k] is basestring:
                        IniParams[k] = str.strip(line[3])
                    
                    elif dtype[k] is int:
                        IniParams[k] = int(float(str.strip(line[3])))
                        
                    elif dtype[k] is bool:
                        if str.strip(line[3]).upper() in ["TRUE", "FALSE"]:
                            IniParams[k] = str.strip(line[3]).upper() == "TRUE"
                        else:
                            raise TypeError("Control file line {0} must be True or False".format(line[0]))
                            
                    elif dtype[k] is float:
                        IniParams[k] = float(str.strip(line[3]))
                    
        # Make dates into seconds since UTC epoch
        IniParams["datastart"] = timegm(strptime(IniParams["datastart"] + " 00:00:00" ,"%m/%d/%Y %H:%M:%S"))
        IniParams["dataend"] = timegm(strptime(IniParams["dataend"],"%m/%d/%Y")) + 86400
    
        if IniParams["modelstart"] is None:
            IniParams["modelstart"] = IniParams["datastart"]
        else:
            IniParams["modelstart"] = timegm(strptime(IniParams["modelstart"] + " 00:00:00","%m/%d/%Y %H:%M:%S"))
    
        if IniParams["modelend"] is None:
            IniParams["modelend"] = IniParams["dataend"]
        else:
            IniParams["modelend"] = timegm(strptime(IniParams["modelend"],"%m/%d/%Y")) + 86400
    
        IniParams["flushtimestart"] = IniParams["modelstart"] - IniParams["flushdays"]*86400
                        
        # If the number of transverse samples per direction 
        # is NOT reported, assume 4 (old default)
        if not IniParams["transsample_count"]:
            IniParams["transsample_count"] = 4.0 
    
        # Format for heat source 8 methods same 
        # as 8 directions but no north
        if IniParams["heatsource8"]:IniParams["trans_count"] = 7

        # Set the total number landcover sample count (0 = emergent)
        IniParams["sample_count"] = int(IniParams["transsample_count"]
                                        * IniParams["trans_count"])
        
        # Set up our evaporation method
        IniParams["penman"] = False
        if IniParams["calcevap"]:
            IniParams["penman"] = True if IniParams["evapmethod"] == "Penman" else False
    
        # make sure that the timestep divides into 60 minutes, 
        # or we may not land squarely on each hour's starting point.
        if float(60)/IniParams["dt"] - int(float(60)/IniParams["dt"]) > 1e-7:
            raise ValueError("I'm sorry, your timestep ({0}) must evenly divide into 60 minutes.".format(IniParams["dt"]))
        else:
            # make dt measured in seconds
            IniParams["dt"] = IniParams["dt"]*60
    
        # Make sure the output directory ends in a 
        # slash based on system platform
        if (platform.system() == "Windows" and IniParams["outputdir"][-1] != "\\"):
            raise ValueError("Output directory needs to have a backslash at end of the path. ..\\outputfolder\\")
    
        if (platform.system() == "Darwin" and IniParams["outputdir"][-1] != "/"):
            raise ValueError("Output directory needs to have a forward slash at the end of the path. ../outputfolder/")    
    
        # the distance step must be an exact, greater or equal to one, 
        # multiple of the sample rate.
        if (IniParams["dx"]%IniParams["longsample"]
            or IniParams["dx"]<IniParams["longsample"]):
            raise ValueError("Distance step (dx) must be a multiple of the longitudinal stream sample distance")
Exemple #23
0
    print "========================="
    
    os.chdir(os.path.realpath(dir))
    
    # load config
    dbhost, dbport, dbname,serverindex = loadConfig()

    #connect to mysql
    conn = MySQLdb.Connect(host=dbhost, user='******', passwd="hoolai12", port=string.atoi(dbport), db=dbname)
    print("connect to mysql succ!")

    cur = conn.cursor()
    while True:
        # load cmd from db per 5 sec
        try:
            cur.execute("select id,serverid,cmdid, cmd, param1, param2 from servercmd where serverid=%s limit 1" % serverindex)
            if cur.rowcount >= 1:
                row = cur.fetchone()
                id = row[0]
                sid = row[1]
                process(sid, row[2], row[3], row[4], row[5],dir)
                            
                #after process cmd, should delete from db
                n = cur.execute("delete from servercmd where id=%d and serverid=%s" % (id,serverindex))
                logging.info("delete from servercmd where id=%d and serverid=%s result:%s" % (id,serverindex,n))
                if n <= 0:
                    exit(-1)    #delete error,exit system
            time.sleep(5)
        except MySQLdb.Error, e:
            logging.ERROR(str(e))
Exemple #24
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='Visualization exmaple')
    parser.add_argument('--s3_bucket',
                        type=str,
                        required=True,
                        help='S3 Bucket to use.')
    parser.add_argument('--s3_prefix',
                        type=str,
                        help='S3 Bucket path prefix.',
                        default="iris-example")

    args = parser.parse_args()

    logging.basicConfig(format='%(levelname)s:%(message)s',
                        level=logging.DEBUG)

    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
    names = [
        'sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'species'
    ]
    iris = pd.read_csv(url, names=names)

    array = iris.values
    X, y = array[:, 0:4], np.where(array[:, 4] == 'Iris-setosa', 1, 0)

    test_size = 0.2
    seed = 7
    X_train, X_test, y_train, y_test = model_selection.train_test_split(
        X, y, test_size=test_size, random_state=seed)

    model = LogisticRegression()
    model.fit(X_train, y_train)

    y_pred = model.predict(X_test)
    logging.info("Trained Model's evaluation score: {}".format(
        model.score(X_test, y_test)))

    df = pd.concat([
        pd.DataFrame(y_test, columns=['target']),
        pd.DataFrame(y_pred, columns=['predicted'])
    ],
                   axis=1)

    vocab = list(df['target'].unique())
    cm = confusion_matrix(df['target'], df['predicted'], labels=vocab)

    data = []
    for target_index, target_row in enumerate(cm):
        for predicted_index, count in enumerate(target_row):
            data.append((vocab[target_index], vocab[predicted_index], count))

    df_cm = pd.DataFrame(data, columns=['target', 'predicted', 'count'])
    cm_file = os.path.join('/tmp', 'confusion_matrix.csv')
    with file_io.FileIO(cm_file, 'w') as f:
        df_cm.to_csv(f,
                     columns=['target', 'predicted', 'count'],
                     header=False,
                     index=False)

    model = tf.keras.Sequential([
        tf.keras.layers.Dense(10, activation='relu',
                              input_shape=(4, )),  # input shape required
        tf.keras.layers.Dense(32, activation='relu'),
        tf.keras.layers.Dense(64, activation='relu'),
        tf.keras.layers.Dense(128, activation='relu'),
        tf.keras.layers.Dense(2)
    ])

    model.compile(optimizer='sgd',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    time_hash = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    log_dir = "/tmp/logs/fit/" + time_hash
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)

    model.fit(x=X_train,
              y=y_train,
              epochs=10,
              validation_data=(X_test, y_test),
              callbacks=[tensorboard_callback])

    # upload to S3
    AWS_REGION = 'us-west-2'
    s3_client = boto3.client('s3', region_name=AWS_REGION)
    try:
        # upload cm file to S3
        cm_file_name = cm_file

        cm_object_name = 'confusion_matrix.csv'
        s3_cm_file = 's3://' + args.s3_bucket + '/' + args.s3_prefix + '/' + cm_object_name
        cm_response = s3_client.upload_file(
            cm_file_name, args.s3_bucket,
            args.s3_prefix + '/' + cm_object_name)

        # upload tb log dir to S3
        s3_tb_file = 's3://' + args.s3_bucket + '/' + args.s3_prefix + '/tb-logs'
        for path, subdirs, files in os.walk(log_dir):
            path = path.replace("\\", "/")
            directory_name = path.replace(log_dir, "")
            for file in files:
                s3_client.upload_file(
                    os.path.join(path, file), args.s3_bucket,
                    args.s3_prefix + '/tb-logs/' + directory_name + '/' + file)

    except ClientError as e:
        logging.info("ERROR IN S3 UPLOADING!!!!!!")
        logging.ERROR(e)

    logging.info("S3 object_name is: {}".format(s3_cm_file))

    metadata = {
        'outputs': [
            # Markdown that is hardcoded inline
            {
                'storage': 'inline',
                'source':
                '# Inline Markdown\n[A link](https://www.kubeflow.org/)',
                'type': 'markdown',
            },
            {
                'source':
                'https://raw.githubusercontent.com/kubeflow/pipelines/master/README.md',
                'type': 'markdown',
            },
            {
                'type':
                'confusion_matrix',
                'format':
                'csv',
                'schema': [
                    {
                        'name': 'target',
                        'type': 'CATEGORY'
                    },
                    {
                        'name': 'predicted',
                        'type': 'CATEGORY'
                    },
                    {
                        'name': 'count',
                        'type': 'NUMBER'
                    },
                ],
                'source':
                s3_cm_file,
                # Convert vocab to string because for bealean values we want "True|False" to match csv data.
                'labels':
                list(map(str, vocab)),
            },
            {
                'type': 'tensorboard',
                'source': s3_tb_file,
            }
        ]
    }

    with file_io.FileIO('/tmp/mlpipeline-ui-metadata.json', 'w') as f:
        json.dump(metadata, f)

    logging.info("Succeed in Markdown")
Exemple #25
0
def get_tasks_by_date():
    try:
        return Task.select().order_by(Task.task_2_date)
    except OperationalError:
        logging.ERROR("Operational Error on get_tasks_by_date")
        return None
Exemple #26
0
def generate_avatar_image():
    logging.info('Generating avatar image')
    custom_circle_colors = [
        'BLUE_01',
        'BLUE_02',
        'PASTEL_BLUE',
        'PASTEL_GREEN',
        'PASTEL_ORANGE',
        'PASTEL_RED',
        'PASTEL_YELLOW',
    ]
    custom_mouth_types = [
        'DEFAULT',
        'EATING',
        'GRIMACE',
        'SMILE',
        'TONGUE',
        'TWINKLE',
    ]
    custom_eyebrow_types = [
        'DEFAULT',
        'DEFAULT_NATURAL',
        'FLAT_NATURAL',
        'RAISED_EXCITED',
        'RAISED_EXCITED_NATURAL',
        'SAD_CONCERNED',
        'SAD_CONCERNED_NATURAL',
        'UNI_BROW_NATURAL',
        'UP_DOWN',
        'UP_DOWN_NATURAL',
        'FROWN_NATURAL',
    ]
    custom_eye_types = [
        'DEFAULT',
        'CLOSE',
        'EYE_ROLL',
        'HAPPY',
        'HEARTS',
        'SIDE',
        'SQUINT',
        'SURPRISED',
        'WINK',
        'WINK_WACKY',
    ]

    # pick a random value from default types
    def r(enum_):
        return random.choice(list(enum_))

    # Make a random customization selection from custom arrays
    def rc(customization, array):
        return eval("py_avataaars." + customization + "." +
                    random.choice(array))

    avatar = py_avataaars.PyAvataaar(
        style=py_avataaars.AvatarStyle.CIRCLE,
        background_color=rc("Color", custom_circle_colors),
        skin_color=r(py_avataaars.SkinColor),
        hair_color=r(py_avataaars.HairColor),
        facial_hair_type=r(py_avataaars.FacialHairType),
        facial_hair_color=r(py_avataaars.HairColor),
        top_type=r(py_avataaars.TopType),
        hat_color=r(py_avataaars.Color),
        mouth_type=rc("MouthType", custom_mouth_types),
        eye_type=rc("EyesType", custom_eye_types),
        eyebrow_type=rc("EyebrowType", custom_eyebrow_types),
        nose_type=r(py_avataaars.NoseType),
        accessories_type=r(py_avataaars.AccessoriesType),
        clothe_type=r(py_avataaars.ClotheType),
        clothe_color=r(py_avataaars.Color),
        clothe_graphic_type=r(py_avataaars.ClotheGraphicType),
    )

    try:
        avatar.render_png_file('avatar.png')
    except Exception as e:
        logging.ERROR('Could not write avatar file with error: %s', e)
Exemple #27
0
def main():
    """
    main function - determines if edc needs to be queried to download xdocs
    (if file is not there, or if it is and force=True)
    downloads the xdocs then calls the function to read the xdocs and print a summary
    """

    start_time = time.time()
    resourceName = ""
    resourceType = ""
    outFolder = "./out"

    # get the command-line args passed
    cmd_parser = setup_cmd_parser()
    args, unknown = cmd_parser.parse_known_args()
    if args.setup:
        # if setup is requested (running standalone)
        # call setupConnection to create a .env file to use next time we run
        print("setup requested..., calling setupConnection & exiting")
        setupConnection.main()
        return

    # setup edc session and catalog url - with auth in the session header,
    # by using system vars or command-line args
    edcHelper.initUrlAndSessionFromEDCSettings()
    edcHelper.validateConnection()
    print(f"EDC version: {edcHelper.edcversion_str} ## {edcHelper.edcversion}")

    print(f"command-line args parsed = {args} ")
    print()

    # print(type(args))
    if args.resourceName is not None:
        resourceName = args.resourceName
    else:
        print(
            "no resourceName specified - we can't download xdocs without knowing "
            "what resource name to use. exiting")
        return

    if args.resourceType is None:
        print("we have a resource name - but no type - need to look it up")
        resourceType = getResourceType(resourceName, edcHelper.session)
    else:
        resourceType = args.resourceType

    if args.outDir is not None:
        outFolder = args.outDir
        print(f"output folder={outFolder}")

    if not os.path.exists(outFolder):
        print(f"creating new output folder: {outFolder}")
        os.makedirs(outFolder)

    print(f"force={args.force}")
    print(f"import to EDC: {args.edcimport}")

    if args.subst_file:
        print(
            f"substitution file referenced in command-line {args.subst_file}")
        mem.subst_dict = read_subst_regex_file(args.subst_file)

    # ready to extract/process xdocs - >=10.5 zip (older 10.4.x not supported))
    if edcHelper.edcversion < 105000:
        print("EDC <= 10.5.0 is not supported for this utility script,"
              f" {edcHelper.edc_build_vers} found.  exiting")
        logging.ERROR("EDC <= 10.5.0 is not supported for this utility script,"
                      f" {edcHelper.edc_build_vers} found.  exiting")
        return

    print("EDC 10.5+ found... calling 10.5 exdoc analyzer")
    # get and process the xdocs
    get_exdocs_zip(resourceName, resourceType, outFolder, args.force)

    logging.info("xdoc_lineage_gen process completed")
    print(f"lineage file written: {mem.lineage_csv_filename}")
    logging.info(f"lineage file written: {mem.lineage_csv_filename}")
    print(f"lineage links written: {mem.links_written}")
    logging.info(f"lineage links written: {mem.links_written}")
    print(f"lineage links skipped (unchanged): {mem.not_changed_count}")
    logging.info(f"lineage links skipped (unchanged): {mem.not_changed_count}")

    print(f"run time = {time.time() - start_time} seconds ---")

    if not args.edcimport:
        print("\ncustom lineage resource will not be created/updated/executed."
              " use -i|-edcimport flag to enable")
        logging.info(
            "custom lineage resource will not be created/updated/executed."
            " use -i|-edcimport flag to enable")
        return ()

    lineage_resource = resourceName + "_lineage"
    lineage_fileonly = mem.lineage_csv_filename[mem.lineage_csv_filename.
                                                rfind("/") + 1:]
    print(
        "ready to create/update lineage resource..."
        f" {lineage_resource} from {mem.lineage_csv_filename} {lineage_fileonly}"
    )
    logging.info(
        "ready to create/update lineage resource..."
        f" {lineage_resource} from {mem.lineage_csv_filename} {lineage_fileonly}"
    )

    # create/update & start the custom lineage import
    edcutils.createOrUpdateAndExecuteResourceUsingSession(
        edcHelper.baseUrl,
        edcHelper.session,
        lineage_resource,
        "template/custom_lineage_template.json",
        lineage_fileonly,
        mem.lineage_csv_filename,
        False,
        "LineageScanner",
    )
Exemple #28
0
def main():

    ##################################################
    # Setup logging
    log_setup()
    logger = logging.getLogger('AppBlocker')
    logger.debug('All calling args:  {}'.format(sys.argv))

    ##################################################
    # Define Script Parameters

    parser = argparse.ArgumentParser(
        description=
        "This script allows you to block applications based on bundle identifier, optionally delete the app, and notify the user."
    )
    parser.add_argument(
        '--action',
        '-a',
        metavar='[ run | install | uninstall ]',
        type=str,
        help='Install or Uninstall the application blocking LaunchDaemon.',
        required=True)
    parser.add_argument(
        '--domain',
        '-d',
        metavar='com.github.mlbz521.BlockedApps',
        type=str,
        help=
        'The preference domain of the block list.  This will also be used for the launchdaemon.',
        required=True)

    args = parser.parse_known_args()
    args = args[0]
    logger.debug('Argparse args:  {}'.format(args))

    if len(sys.argv) > 1:
        if args.domain:
            preference_domain = (args.domain).strip()
        if args.action:
            action = (args.action).strip()
    else:
        parser.print_help()
        sys.exit(0)

    ##################################################
    # Define Variables

    os_minor_version = platform.mac_ver()[0].split('.')[1]
    launch_daemon_label = preference_domain
    launch_daemon_location = '/Library/LaunchDaemons/{}.plist'.format(
        launch_daemon_label)
    script_location = '/usr/local/bin/AppBlocker'
    console_user = (runUtility(
        '/usr/sbin/scutil <<< "show State:/Users/ConsoleUser" | /usr/bin/awk \'/Name :/ && ! /loginwindow/ { print $3 }\''
    )).strip()

    ##################################################
    # Define callback for notification
    class AppLaunch(NSObject):
        def appLaunched_(self, notification):

            # Store the userInfo dict from the notification
            userInfo = notification.userInfo

            blockedApplications = []
            blockedBundleIdentifiers = []

            for dictItem in CFPreferencesCopyAppValue('BlockedApps',
                                                      preference_domain):
                blockedApplications.append(dictItem)

            # List of all blocked bundle identifiers. Can use regexes.
            for blockedBundleID in blockedApplications:
                blockedBundleIdentifiers.append(blockedBundleID['Application'])

            # Combine all bundle identifiers and regexes to one
            blockedBundleIdentifiersCombined = "(" + ")|(".join(
                blockedBundleIdentifiers) + ")"

            # Get the launched applications bundle identifier
            bundleIdentifier = userInfo()['NSApplicationBundleIdentifier']

            # Check if launched app's bundle identifier matches any 'blockedBundleIdentifiers'
            if re.match(blockedBundleIdentifiersCombined, bundleIdentifier):

                app_name = userInfo()['NSApplicationName']

                for blockedList in blockedApplications:
                    if blockedList['Application'] == bundleIdentifier:
                        blockedApp = blockedList

                logger.info(
                    'Restricted application \'{appname}\' matching bundleID \'{bundleID}\' was opened by {user}.'
                    .format(appname=app_name,
                            bundleID=bundleIdentifier,
                            user=console_user))

                # Get path of launched app
                path = userInfo()['NSApplicationPath']

                # Get PID of launched app
                pid = userInfo()['NSApplicationProcessIdentifier']

                # Quit launched app
                os.kill(pid, signal.SIGKILL)

                # Alert user
                if blockedApp['AlertUser']:
                    app_icon = CFPreferencesCopyAppValue(
                        'CFBundleIconFile',
                        '{}/Contents/Info.plist'.format(path))
                    alertIconPath = "{path}/Contents/Resources/{app_icon}".format(
                        path=path, app_icon=app_icon)
                    root, ext = os.path.splitext(alertIconPath)
                    if not ext:
                        alertIconPath = alertIconPath + '.icns'
                    alert(blockedApp['AlertTitle'].format(appname=app_name),
                          blockedApp['AlertMessage'], ["OK"], alertIconPath)

                # Delete app if blocked
                if blockedApp['DeleteApp']:
                    try:
                        shutil.rmtree(path)
                    except OSError as error:
                        print("Error: {} - {}.".format(error.filename,
                                                       error.strerror))

    ##################################################
    # Define alert class
    class Alert(object):
        def __init__(self, messageText):
            super(Alert, self).__init__()
            self.messageText = messageText
            self.informativeText = ""
            self.buttons = []
            self.icon = ""

        def displayAlert(self):
            alert = NSAlert.alloc().init()
            alert.setMessageText_(self.messageText)
            alert.setInformativeText_(self.informativeText)
            alert.setAlertStyle_(NSInformationalAlertStyle)
            for button in self.buttons:
                alert.addButtonWithTitle_(button)

            if os.path.exists(self.icon):
                icon = NSImage.alloc().initWithContentsOfFile_(self.icon)
                alert.setIcon_(icon)
            else:
                icon = NSImage.alloc().initWithContentsOfFile_(
                    "/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources/AlertStopIcon.icns"
                )
                alert.setIcon_(icon)

            # Don't show the Python rocketship in the dock
            NSApp.setActivationPolicy_(1)

            NSApp.activateIgnoringOtherApps_(True)
            alert.runModal()

    ##################################################
    # Define an alert
    def alert(message="Default Message",
              info_text="",
              buttons=["OK"],
              app_icon=""):
        ap = Alert(message)
        ap.informativeText = info_text
        ap.buttons = buttons
        ap.icon = app_icon
        ap.displayAlert()

    ##################################################

    if action == 'install':
        logger.info('Installing the AppBlocker service...')

        if os.path.exists(script_location):
            logger.info('Service already exists; checking version...')
            dirname, basename = os.path.split(script_location)
            sys.path.insert(1, dirname)
            system_name = os.path.splitext(basename)[0]
            system_instance = importlib.import_module(system_name)
            system_version = system_instance.__version__

            if system_version == __version__:
                logger.info('Version:  current')
                startDaemon(launch_daemon_label=launch_daemon_label,
                            launch_daemon_location=launch_daemon_location,
                            os_minor_version=os_minor_version)

            else:
                logger.info('Updating the systems\' AppBlocker service...')
                # "Install" script
                shutil.copy(__file__, script_location)

                # Create the LaunchDaemon
                createDaemon(script_location=script_location,
                             launch_daemon_label=launch_daemon_label,
                             launch_daemon_location=launch_daemon_location)
                stopDaemon(launch_daemon_label=launch_daemon_label,
                           os_minor_version=os_minor_version)
                startDaemon(launch_daemon_label=launch_daemon_label,
                            launch_daemon_location=launch_daemon_location,
                            os_minor_version=os_minor_version)

        else:
            # "Install" script
            shutil.copy(__file__, script_location)

            # Create the LaunchDaemon
            createDaemon(script_location=script_location,
                         launch_daemon_label=launch_daemon_label,
                         launch_daemon_location=launch_daemon_location)
            stopDaemon(launch_daemon_label=launch_daemon_label,
                       os_minor_version=os_minor_version)
            startDaemon(launch_daemon_label=launch_daemon_label,
                        launch_daemon_location=launch_daemon_location,
                        os_minor_version=os_minor_version)

    elif action == 'uninstall':
        logger.info('Removing the AppBlocker service...')

        if os.path.exists(script_location):
            try:
                os.remove(script_location)
            except OSError as error:
                logging.ERROR("Error: {} - {}.".format(error.filename,
                                                       error.strerror))

        # Stop the LaunchDaemon
        stopDaemon(launch_daemon_label=launch_daemon_label,
                   os_minor_version=os_minor_version)

        if os.path.exists(launch_daemon_location):
            os.remove(launch_daemon_location)

    elif action == 'run':
        # Register for 'NSWorkspaceDidLaunchApplicationNotification' notifications
        nc = Foundation.NSWorkspace.sharedWorkspace().notificationCenter()
        AppLaunch = AppLaunch.new()
        nc.addObserver_selector_name_object_(
            AppLaunch, 'appLaunched:',
            'NSWorkspaceWillLaunchApplicationNotification', None)

        logger.info('Starting AppBlocker...')
        # Launch "app"
        AppHelper.runConsoleEventLoop()
        logger.info('Stopping AppBlocker...')
Exemple #29
0
    def generic_2d(self,
                   bmrb_ids,
                   file_names=None,
                   atom_x='H',
                   atom_y='N',
                   auth_tag=False,
                   legend=None,
                   draw_trace=False,
                   peak_list=None,
                   output_format='html',
                   output_file=None,
                   output_image_width=800,
                   output_image_height=600):
        peak_list_2d = self.create_2d_peaklist(bmrb_ids,
                                               atom_x=atom_x,
                                               atom_y=atom_y,
                                               file_names=file_names,
                                               auth_tag=auth_tag,
                                               draw_trace=draw_trace)
        if peak_list is not None:
            x1 = []
            y1 = []
            with open(peak_list) as csvfile:
                spamreader = csv.reader(csvfile, delimiter=',')
                for row in spamreader:
                    x1.append(float(row[0]))
                    y1.append(float(row[1]))
        x = peak_list_2d[0]
        y = peak_list_2d[1]
        data_set = peak_list_2d[2]
        info = peak_list_2d[3]
        res = peak_list_2d[4]
        cs_track = peak_list_2d[5]
        if legend is None:
            fig = px.scatter(x=x,
                             y=y,
                             symbol=data_set,
                             hover_name=info,
                             color=res,
                             labels={
                                 "color": "Residue",
                                 "symbol": "Data set",
                                 "x": '{} (ppm)'.format(atom_x),
                                 "y": '{} (ppm)'.format(atom_y)
                             })
            if draw_trace:
                for k in cs_track.keys():
                    fig.add_scatter(x=cs_track[k][0], y=cs_track[k][1], name=k)
            if peak_list is not None:
                fig.add_scatter(x=x1, y=y1, mode='markers', name='Peak list')
            fig.update_layout(showlegend=False)
            fig.update_xaxes(autorange="reversed")
            fig.update_yaxes(autorange="reversed")

        elif legend == 'residue':
            fig = px.scatter(
                x=x,
                y=y,
                hover_name=info,
                color=res,
                labels={
                    "color": "Residue",
                    # "symbol": "Data set",
                    "x": '{} (ppm)'.format(atom_x),
                    "y": '{} (ppm)'.format(atom_y)
                })
            if draw_trace:
                for k in cs_track.keys():
                    fig.add_scatter(x=cs_track[k][0],
                                    y=cs_track[k][1],
                                    name=k,
                                    mode='lines')
            if peak_list is not None:
                fig.add_scatter(x=x1, y=y1, mode='markers', name='Peak list')
            fig.update_xaxes(autorange="reversed")
            fig.update_yaxes(autorange="reversed")

        elif legend == 'dataset':
            fig = px.scatter(
                x=x,
                y=y,
                hover_name=info,
                color=data_set,
                labels={
                    "color": "Data set",
                    # "symbol": "Data set",
                    "x": '{} (ppm)'.format(atom_x),
                    "y": '{} (ppm)'.format(atom_y)
                })
            if draw_trace:
                for k in cs_track.keys():
                    fig.add_scatter(x=cs_track[k][0], y=cs_track[k][1], name=k)
            if peak_list is not None:
                fig.add_scatter(x=x1, y=y1, mode='markers', name='Peak list')
            fig.update_xaxes(autorange="reversed")
            fig.update_yaxes(autorange="reversed")
        fig.show()
        if output_file is not None:
            if output_format == 'html':
                fig.write_html('{}.html'.format(output_file))
                logging.info('Sucessfully written {}.html'.format(output_file))
            elif output_format == 'jpg':
                fig.write_image('{}.jpg'.format(output_file),
                                width=output_image_width,
                                height=output_image_height)
                logging.info('Sucessfully written {}.jpg'.format(output_file))
            elif output_format == 'png':
                fig.write_image('{}.png'.format(output_file),
                                width=output_image_width,
                                height=output_image_height)
                logging.info('Sucessfully written {}.png'.format(output_file))
            elif output_format == 'pdf':
                fig.write_image('{}.pdf'.format(output_file),
                                width=output_image_width,
                                height=output_image_height)
                logging.info('Sucessfully written {}.pdf'.format(output_file))
            elif output_format == 'webp':
                fig.write_image('{}.webp'.format(output_file),
                                width=output_image_width,
                                height=output_image_height)
                logging.info('Sucessfully written {}.wepb'.format(output_file))
            else:
                logging.ERROR(
                    'Output file format nor support:{}'.format(output_format))
        return True
Exemple #30
0
    for agent in env.agents:
        # print(agent.position)
        # print(agent.direction)  # direction encoding: {0: North, 1: East, 2: South, 3: West}
        current_pos = agent.position
        prev_pos = None

        if agent.direction == 0:
            prev_pos = (current_pos[0] + 1, current_pos[1])
        elif agent.direction == 1:
            prev_pos = (current_pos[0], current_pos[1] - 1)
        elif agent.direction == 2:
            prev_pos = (current_pos[0] - 1, current_pos[1])
        elif agent.direction == 3:
            prev_pos = (current_pos[0], current_pos[1] + 1)
        else:
            logging.ERROR('Invalid prev_pos')
            exit(1)

        start_node = (current_pos, prev_pos)
        index = node2idx[start_node]

        start_idx.append(index)
        # print(start_node, index)

    # print(start_idx)

    pos2nodes = dict()
    for _, (cur, prev) in idx2node.items():
        if cur not in pos2nodes:
            pos2nodes[cur] = []
        pos2nodes[cur].append((cur, prev))