Esempio n. 1
0
def main():
    main_data = get_json_data('res/db4')
    random.shuffle(main_data)
    tagged = [d for d in main_data if d["tags"]]
    untagged = [d for d in main_data if not d["tags"]]
    N = len(tagged)
    untagged = untagged[:N]  #TODO: remove
    print(N)
    train_data = tagged[:-N / 10]
    test_data = tagged[-N / 10:]
    path = os.getcwd()
    at = AutoTag(os.path.join(path, 'res'))
    tags = at.get_tags()
    print(tags)

    print('classifying')
    at.classify(train_data, tags=tags)

    print("testing")
    print(at.test(test_data))

    tagged = [d for d in main_data if '231' in d["tags"]]
    untagged = [d for d in main_data if '231' not in d["tags"]][:100]
    print(len(tagged))
    print(at.get_most_informative_features_from_data(tagged, untagged, 10))
Esempio n. 2
0
def main():
    main_data = get_json_data('res/db4')
    random.shuffle(main_data)
    tagged = [d for d in main_data if d["tags"]]
    untagged = [d for d in main_data if not d["tags"]]
    N = len(tagged)
    untagged = untagged[:N]#TODO: remove
    print(N)
    train_data = tagged[:-N/10]
    test_data = tagged[-N/10:]
    path = os.getcwd()
    at = AutoTag(os.path.join(path, 'res'))
    tags = at.get_tags()
    print(tags)

    print('classifying')
    at.classify(train_data, tags=tags)
   
    print("testing")
    print(at.test(test_data))

    tagged = [d for d in main_data if '231' in d["tags"]]
    untagged = [d for d in main_data if '231' not in d["tags"]][:100]
    print(len(tagged))
    print(at.get_most_informative_features_from_data(tagged, untagged, 10))
Esempio n. 3
0
def svm_predict():
    '''
    预测准确度
    Returns:
        预测结果
    '''
    args = {
        'predict_x': None
    }
    acq = ['predict_x']
    types = {
        'predict_x': [list, list, float]
    }

    args = get_json_data(raw=request.get_data(as_text=True), args=args)
    missed_arg = check_aquired_args(args=args, acqs=acq)
    if missed_arg is not None:
        return json.dumps({'state': 'fail', 'msg': '缺少必要参数'})
    args = type_format(args=args, types=types)

    clf = get_trained_svm()
    if clf is None:
        return json.dumps({'state': 'fail', 'msg': '模型尚未训练'})
    rs = predict(clf, args['predict_x'])
    return json.dumps({'state': 'done', 'result': rs.tolist()})
Esempio n. 4
0
class Global:
    USER_DIR = None

    CONV = get_json_data("conv.json")

    LANG = "DE"

    BOT = None
Esempio n. 5
0
def update_corpus_ids(doc_id):
    corpus_ids_file = "./visualization/corpus/corpus_ids.json"
    corpus_ids = get_json_data(corpus_ids_file) if os.path.isfile(
        corpus_ids_file) else []

    if doc_id not in corpus_ids:
        corpus_ids = [doc_id] + corpus_ids
        save_to_json(corpus_ids_file, corpus_ids)
        print('corpus ids updated: %s' % doc_id)
Esempio n. 6
0
def main():
    #root = r"D:\python\competition\dataset\tile_round1_train_20201231"
    root = "/home/user/dataset/tile_round1_train_20201231"
    json_file = "train_annos.json"
    images_dir = "train_imgs"
    save_path = "/home/user/dataset/tile_round1_split"
    # divide_size = [128, 128]  # [h, w]
    # stride = [120, 120]       # [h, w]
    # max_num = 1000

    #divide_size = [640, 640]  # [h, w]
    divide_size = [416, 416]
    overlap_ratio = 0.2
    stride = [
        int(divide_size[0] * (1 - overlap_ratio)),
        int(divide_size[1] * (1 - overlap_ratio))
    ]  # [h, w]
    max_num = 1000000

    if not os.path.exists(save_path):
        os.mkdir(save_path)

    save_path = check_or_make_dir(save_path,
                                  "size{}X{}".format(divide_size[0],
                                                     divide_size[1]),
                                  mkdir=True)

    images_dir_path = os.path.join(root, images_dir)
    json_file_path = os.path.join(root, json_file)

    images_name = os.listdir(images_dir_path)
    json_data = get_json_data(json_file_path)
    images_data = json_data_to_images_data(json_data)
    random.shuffle(images_name)
    p = Pool(12)  # clw modify: origin is 4
    for i, img_name in enumerate(tqdm(images_name)):
        if max_num != -1:
            if i >= max_num:
                continue
        try:
            # deal_one_image(img_name, images_data, images_dir_path, divide_size, stride, save_path)
            p.apply_async(deal_one_image,
                          args=(
                              img_name,
                              images_data,
                              images_dir_path,
                              divide_size,
                              stride,
                              save_path,
                          ))
        except Exception as e:
            print(e, img_name)
            continue
    p.close()
    p.join()
    generate_val_dataset(save_path, p=0.2)  #
Esempio n. 7
0
    def fetch_details(self):
	jf = get_json_data(URL_MATCHES_DETAIL % self.detailID)
        
        details = jf[0]

        for key in IYMS_ORDER:
            self.ratios.append(parse_float(details[key]))

        for key in SK_ORDER:
            self.ratios.append(parse_float(details[key]))
Esempio n. 8
0
    def fetch_details(self):
        jf = get_json_data(URL_MATCHES_DETAIL % self.detailID)

        details = jf[0]

        for key in IYMS_ORDER:
            self.ratios.append(parse_float(details[key]))

        for key in SK_ORDER:
            self.ratios.append(parse_float(details[key]))
Esempio n. 9
0
def hk_freq(data_dir, hk_dir):
    print('hk freq')
    data = get_json_data(data_dir)
    at = AutoTag()
    for entry in data:
        entry['text'] = clean_text(entry['text'])
    if not os.path.isdir(hk_dir):
        os.mkdir(hk_dir)
    with open(hk_dir + 'total', 'w') as f:
        pass
    word_count = at.count_data(
        [w for entry in data for w in entry['text'].split()], hk_dir + 'total')
    words = [w.encode('utf-8') for w, c in word_count if c > 40]
    with open(hk_dir + 'freqs.csv', 'wb') as csvfile:
        #         data_encoded = [w.encode('utf-8') for w,c in word_count if c > 40]
        w = csv.writer(csvfile,
                       delimiter=',',
                       quotechar='"',
                       quoting=csv.QUOTE_MINIMAL)
        w.writerow([u'HK'] + words)
#         csvfile.write(','.join([u'HK']+words) + '\n')

    hkwords = {}
    data_json = get_json(data_dir)
    for json_entry in data_json:
        if json_entry['model'] != "facebook_feeds.facebook_feed":
            continue
        name = json_entry['fields']['name']
        print(name)
        if not name:
            continue
        name = name.encode('utf-8')
        word_count = at.count_data([
            w for entry in data
            for w in entry['text'].split() if entry["feed"] == json_entry['pk']
        ], hk_dir + name)
        word_dict = {w.encode('utf-8'): c for w, c in word_count}
        hkwords[name] = []
        for word in words:
            if word not in word_dict:
                hkwords[name].append(str(0))
            else:
                hkwords[name].append(str(word_dict[word]))
        with open(hk_dir + 'freqs.csv', 'a') as csvfile:
            writer = csv.writer(csvfile, delimiter=',')
            #             writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
            writer.writerow([name] + hkwords[name])

    with open(hk_dir + 'freqs_t.csv', 'a') as csvfile:
        writer = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='"',
                            quoting=csv.QUOTE_MINIMAL)
        for name in hkwords:
            writer.writerow([name] + hkwords[name])
Esempio n. 10
0
 def __getBlocks(self, site):
     
     if os.path.isfile('%s/blocks_%s'%(self.__tmpdirectory, site)):
         unpopularBlocks = get_json_data_from_file('%s/blocks_%s'%(self.__tmpdirectory, site))
         return unpopularBlocks
     
     else:
         url = 'http://cms-popularity-prod.cern.ch/popdb/victorinterface/popdbcombine/?sitename=%s' %(site)
         unpopularBlocks = get_json_data(url)
         
         dumpTemporaryInfo(unpopularBlocks, self.__tmpdirectory, 'blocks_%s'%(site))      
         return unpopularBlocks 
Esempio n. 11
0
 def test_correct_dict(self):
     test_dict = {
         'username': '******',
         'password': '******'
     }
     with self.app as c:
         c.get(
             '/api/igpost',
             json=test_dict
         )
         response = utils.get_json_data()
         self.assertEqual(response, test_dict)
Esempio n. 12
0
    def __hasCustodialCopy(self, blockname):
        url = 'https://cmsweb.cern.ch/phedex/datasvc/json/prod/blockreplicas?custodial=y&complete=y&block=%s' % (
            blockname)
        url = url.replace('#', '%23')
        self.__logger.debug(url)
        replicas = get_json_data(url)
        '''
        replicas = {
            "phedex":{
                "request_version":"2.1.8",
                "request_timestamp":1311755259.31146,
                "instance":"prod",
                "request_call":"blockreplicas",
                "request_url":"http://cmsweb.cern.ch:7001/phedex/datasvc/json/prod/blockreplicas",
                "request_date":"2011-07-27 08:27:39 UTC",
                "block":[
                    {
                        "bytes":"67095800951",
                        "files":"32",
                        "is_open":"n",
                        "name":"/ZJetsToNuNu_Pt-100_7TeV-herwigpp/Summer11-START311_V2-v1/GEN-SIM#7f6b861b-2263-4854-8abf-d096d35d9f1a",
                        "id":"2576551",
                        "replica":[
                            {
                                "bytes":"67095800951",
                                "files":"32",
                                "node":"T1_IT_CNAF_MSS",
                                "time_create":"1311331011",
                                "time_update":"1311610457.398",
                                "group":"DataOps",
                                "node_id":"8",
                                "custodial":"y",
                                "se":"storm-fe-cms.cr.cnaf.infn.it",
                                "subscribed":"y",
                                "complete":"y"}]}],
                "call_time":"0.05906"}}
        '''
        try:
            if replicas['phedex']['block'][0]['replica']:
                return True
        except KeyError:
            self.__logger.warning(
                'Block %s excepted with KeyError. replicas = %s' %
                (blockname, replicas))
            return False
        except IndexError:
            self.__logger.warning(
                'Block %s excepted with IndexError. replicas = %s' %
                (blockname, replicas))
            return False

        return False
Esempio n. 13
0
    def __hasCustodialCopy(self, blockname):
        url = "https://cmsweb.cern.ch/phedex/datasvc/json/prod/blockreplicas?custodial=y&complete=y&block=%s" % (
            blockname
        )
        url = url.replace("#", "%23")
        self.__logger.debug(url)
        replicas = get_json_data(url)
        """
        replicas = {
            "phedex":{
                "request_version":"2.1.8",
                "request_timestamp":1311755259.31146,
                "instance":"prod",
                "request_call":"blockreplicas",
                "request_url":"http://cmsweb.cern.ch:7001/phedex/datasvc/json/prod/blockreplicas",
                "request_date":"2011-07-27 08:27:39 UTC",
                "block":[
                    {
                        "bytes":"67095800951",
                        "files":"32",
                        "is_open":"n",
                        "name":"/ZJetsToNuNu_Pt-100_7TeV-herwigpp/Summer11-START311_V2-v1/GEN-SIM#7f6b861b-2263-4854-8abf-d096d35d9f1a",
                        "id":"2576551",
                        "replica":[
                            {
                                "bytes":"67095800951",
                                "files":"32",
                                "node":"T1_IT_CNAF_MSS",
                                "time_create":"1311331011",
                                "time_update":"1311610457.398",
                                "group":"DataOps",
                                "node_id":"8",
                                "custodial":"y",
                                "se":"storm-fe-cms.cr.cnaf.infn.it",
                                "subscribed":"y",
                                "complete":"y"}]}],
                "call_time":"0.05906"}}
        """
        try:
            if replicas["phedex"]["block"][0]["replica"]:
                return True
        except KeyError:
            self.__logger.warning("Block %s excepted with KeyError. replicas = %s" % (blockname, replicas))
            return False
        except IndexError:
            self.__logger.warning("Block %s excepted with IndexError. replicas = %s" % (blockname, replicas))
            return False

        return False
Esempio n. 14
0
def hk_freq(data_dir, hk_dir):
    print("hk freq")
    data = get_json_data(data_dir)
    at = AutoTag()
    for entry in data:
        entry["text"] = clean_text(entry["text"])
    if not os.path.isdir(hk_dir):
        os.mkdir(hk_dir)
    with open(hk_dir + "total", "w") as f:
        pass
    word_count = at.count_data([w for entry in data for w in entry["text"].split()], hk_dir + "total")
    words = [w.encode("utf-8") for w, c in word_count if c > 40]
    with open(hk_dir + "freqs.csv", "wb") as csvfile:
        #         data_encoded = [w.encode('utf-8') for w,c in word_count if c > 40]
        w = csv.writer(csvfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
        w.writerow([u"HK"] + words)
    #         csvfile.write(','.join([u'HK']+words) + '\n')

    hkwords = {}
    data_json = get_json(data_dir)
    for json_entry in data_json:
        if json_entry["model"] != "facebook_feeds.facebook_feed":
            continue
        name = json_entry["fields"]["name"]
        print(name)
        if not name:
            continue
        name = name.encode("utf-8")
        word_count = at.count_data(
            [w for entry in data for w in entry["text"].split() if entry["feed"] == json_entry["pk"]], hk_dir + name
        )
        word_dict = {w.encode("utf-8"): c for w, c in word_count}
        hkwords[name] = []
        for word in words:
            if word not in word_dict:
                hkwords[name].append(str(0))
            else:
                hkwords[name].append(str(word_dict[word]))
        with open(hk_dir + "freqs.csv", "a") as csvfile:
            writer = csv.writer(csvfile, delimiter=",")
            #             writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
            writer.writerow([name] + hkwords[name])

    with open(hk_dir + "freqs_t.csv", "a") as csvfile:
        writer = csv.writer(csvfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
        for name in hkwords:
            writer.writerow([name] + hkwords[name])
Esempio n. 15
0
def hk_freq(data_dir, hk_dir):
    print('hk freq')
    data = get_json_data(data_dir)
    at = AutoTag()
    for entry in data:
        entry['text'] = clean_text(entry['text'])
    if not os.path.isdir(hk_dir):
        os.mkdir(hk_dir)
    with open(hk_dir+'total', 'w') as f:
        pass
    word_count = at.count_data([w for entry in data for w in entry['text'].split()],hk_dir+'total')
    words = [w.encode('utf-8') for w,c in word_count if c > 40]
    with open(hk_dir+'freqs.csv', 'wb') as csvfile:
#         data_encoded = [w.encode('utf-8') for w,c in word_count if c > 40]
        w = csv.writer(csvfile, delimiter = ',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        w.writerow([u'HK']+words)
#         csvfile.write(','.join([u'HK']+words) + '\n')
   
    hkwords = {}
    data_json = get_json(data_dir)
    for json_entry in data_json:
        if json_entry['model'] != "facebook_feeds.facebook_feed":
            continue
        name = json_entry['fields']['name']
        print(name) 
        if not name:
            continue
        name = name.encode('utf-8')
        word_count = at.count_data([w for entry in data for w in entry['text'].split() if entry["feed"] == json_entry['pk']],hk_dir+name)
        word_dict = {w.encode('utf-8'):c for w,c in word_count}
        hkwords[name] = []
        for word in words:
            if word not in word_dict:
                hkwords[name].append(str(0))
            else:
                hkwords[name].append(str(word_dict[word])) 
        with open(hk_dir+'freqs.csv', 'a') as csvfile:
            writer = csv.writer(csvfile, delimiter=',')
#             writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
            writer.writerow([name]+hkwords[name])
     
    
    with open(hk_dir+'freqs_t.csv', 'a') as csvfile:
        writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        for name in hkwords:
            writer.writerow([name]+hkwords[name])
Esempio n. 16
0
def get_authors(rg_paper_id):
    """Get authors for paper with rg_paper_id"""
    logger.debug("Get authors for paper.")
    ref_url = _AUTHORSLISTDATA.format(rg_paper_id, 0, 100)
    url = _FULLURL.format(_HOST, ref_url)
    try:
        req_result = utils.get_json_data(url, _PROXY_OBJ.get_cur_proxy())
        logger.debug("Parse host answer from json.")
        dict_req_result = json.loads(req_result)
    except BaseException:
        logger.warn(traceback.format_exc())
        return None
    success = dict_req_result['success']
    logger.debug("Status=%s." % success)
    if success:
        logger.debug("Data is correct and parse is successfuly.")
        return dict_req_result['result']['loadedItems']
    logger.debug("Data is not correct.")
    return None
Esempio n. 17
0
def get_auth_info(rg_account_id):
    """Get info about author with account_id"""
    logger.debug("Downloading the auth info.")
    ref_url = _AUTHORDATA.format(rg_account_id)
    url = _FULLURL.format(_HOST, ref_url)
    try:
        req_result = utils.get_json_data(url, _PROXY_OBJ.get_cur_proxy())
        logger.debug("Parse host answer from json.")
        dict_req_result = json.loads(req_result)
    except BaseException:
        logger.warn(traceback.format_exc())
        return None
    success = dict_req_result['success']
    logger.debug("Status=%s." % success)
    if success:
        logger.debug("Data is correct and parse is successfuly.")
        return dict_req_result['result']['data']
    logger.debug("Data is not correct.")
    return None
Esempio n. 18
0
def svm_train():
    '''

    Returns:
    acc: 在测试集上的精度
    '''
    #参数及默认值
    args = {
        'train_x': None, 'train_y': None,
        'C': 1.0, 'kernel': 'rbf', 'degree': 3, 'gamma': 'auto', 'coef0': 0.0,
        'shrinking': True, 'probability': False, 'tol': 0.001, 'cache_size': 200,
        'class_weight': None, 'verbose': False, 'max_iter': -1,
        'decision_function_shape': 'ovr', 'random_state': None,
        'test_size':0.1
    }
    #必须参数, 不要在arg中给必须参数赋默认值
    acq = ['train_x', 'train_y']
    #参数限制暂时不写
    lim = []

    #参数类型,注意,这里忽略了同时可以为str或者float的参数, 例如gamma
    #对于list的参数,第一个元素表示本身,第二个表示第一维索引的元素类型,以此类推
    types ={
        'train_x': [list, list, float], 'train_y':[list, float],
        'C': float, 'kernel': str, 'degree': int,  'coef0': float,
        'shrinking': bool, 'probability': bool, 'tol': float, 'cache_size': int,
        'class_weight': str, 'verbose': bool, 'max_iter': int, 'decision_function_shape': str, 'random_state': int,
        'test_size': float
    }

    args = get_json_data(raw=request.get_data(as_text=True), args=args)
    if args is None:
        return json.dumps({'state': 'fail', 'msg': '未接收到任何json数据'}, ensure_ascii=False)
    missed_arg = check_aquired_args(args, acq)
    if missed_arg is not None:
        return json.dumps({'state': 'fail', 'msg': '缺少必须参数'}, ensure_ascii=False)
    args = type_format(args=args, types=types)

    clf=get_svm(**args)

    acc = train(clf, x=args['train_x'], y= args['train_y'], test_size=args['test_size'], max_iter= args['max_iter'])
    return json.dumps({'state': 'done', 'acc': acc})
Esempio n. 19
0
def get_referring_papers(rg_paper_id):
    """Get references dict for paper with rg_paper_id"""
    logger.debug("Downloading the list of referring articles.")
    ref_url = _PUBREFERENCESDATA.format(rg_paper_id)
    url = _FULLURL.format(_HOST, ref_url)
    try:
        req_result = utils.get_json_data(url, _PROXY_OBJ.get_cur_proxy())
        logger.debug("Parse host answer from json.")
        dict_req_result = json.loads(req_result)
    except BaseException:
        logger.warn(traceback.format_exc())
        return None
    success = dict_req_result['success']
    logger.debug("Status=%s." % success)
    if success:
        logger.debug("Data is correct and parse is successfuly.")
        return dict_req_result['result']['state'][
            'publicliteratureReferences']['itemEntities']
    logger.debug("Data is not correct.")
    return None
Esempio n. 20
0
 def __get_groupusage_values(self):
     
     url = "https://cmsweb.cern.ch/phedex/datasvc/json/prod/groupusage"
     groupusage_aux = get_json_data(url)
     groupusage = {}
     
     #Massage information to make it more accessible        
     for site_dic in groupusage_aux['phedex']['node']:
     
         site = site_dic["name"]
         groupusage[site]={}
         for group_dic in site_dic['group']:
             group = group_dic["name"]
             try:
                 groupusage[site][group]={"dest_bytes": int(group_dic["dest_bytes"]),    
                                          "node_bytes": int(group_dic["node_bytes"])}
             except:
                 self.__logger.error('Incomplete information for group %s in site %s in %s' %(group, site, url))   
 
     return groupusage
Esempio n. 21
0
class TestHRMLogin(unittest.TestCase):
    @classmethod
    def setUpClass(cls) -> None:
        cls.login_api = LoginApi()

    @classmethod
    def tearDownClass(cls) -> None:
        ...

    def setUp(self) -> None:
        ...

    def tearDown(self) -> None:
        ...

    @parameterized.expand(utils.get_json_data())
    def test01_login(self, mobile, pwd, http_code, success, code, message):
        response = self.login_api.login(mobile, pwd)
        json_data = response.json()
        logging.info("登陆测试返回的数据为{}".format(json_data))
        utils.assert_common(self, response, http_code, success, code, message)
Esempio n. 22
0
def download_set(json_path: str, save_folder: str, log_path: str):
    logging.basicConfig(filename=log_path, level=logging.ERROR)

    images, _ = get_json_data(json_path)
    reg = re.compile('image/(\w+)')

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count() *
                                               8) as executor:
        futures = []
        for data in images:
            url_str = data['url'][0]
            img_id = data['image_id']
            futures.append(
                executor.submit(
                    partial(download_image, url_str, save_folder, img_id,
                            reg)))
        [future.result() for future in tqdm(futures)]
    return
Esempio n. 23
0
 def __get_nodeusage_values(self):
     
     url = "https://cmsweb.cern.ch/phedex/datasvc/json/prod/nodeusage"
     nodeusage_aux = get_json_data(url)
     nodeusage = {}
     
     #Massage information to make it more accessible        
     for site_dic in nodeusage_aux['phedex']['node']:
     
         site = site_dic["name"]
         try:
             nodeusage[site]={"src_node_bytes": int(site_dic["src_node_bytes"]),
                              "nonsrc_node_bytes": int(site_dic["nonsrc_node_bytes"]),
                              "noncust_dest_bytes": int(site_dic["noncust_dest_bytes"]),                                
                              "noncust_node_bytes": int(site_dic["noncust_node_bytes"]),                                
                              "cust_node_bytes": int(site_dic["cust_node_bytes"]),
                              "cust_dest_bytes": int(site_dic["cust_dest_bytes"])                                 
                             }
         except:
             self.__logger.error('Incomplete information for site %s in %s' %(site, url))   
 
     return nodeusage
Esempio n. 24
0
def get_info_from_RIS(rg_paper_id):
    """Get RIS file for paper with rg_paper_id"""
    logger.debug(
        "Downloading RIS data about paper. RGID={0}.".format(rg_paper_id))
    data = utils.get_json_data(
        _FULLURL.format(_HOST, _PUBRISDATA.format(rg_paper_id)),
        _PROXY_OBJ.get_cur_proxy()).replace("\r", "")
    logger.debug("RIS file:\n%s" % data)
    logger.debug("Parse RIS data.")
    res = None
    try:
        if data is None:
            logger.debug("RIS data is empty.")
            return res
        datalines = data.split("\n")
        RISgenerator = RISparser.read(datalines)
        res = next(RISgenerator)
        if "doi" in res and not utils.is_doi(res["doi"]):
            res.pop("doi")
        res["RIS"] = data
    except BaseException:
        logger.warn(traceback.format_exc())
    return res
Esempio n. 25
0
    def __get_groupusage_values(self):

        url = "https://cmsweb.cern.ch/phedex/datasvc/json/prod/groupusage"
        groupusage_aux = get_json_data(url)
        groupusage = {}

        #Massage information to make it more accessible
        for site_dic in groupusage_aux['phedex']['node']:

            site = site_dic["name"]
            groupusage[site] = {}
            for group_dic in site_dic['group']:
                group = group_dic["name"]
                try:
                    groupusage[site][group] = {
                        "dest_bytes": int(group_dic["dest_bytes"]),
                        "node_bytes": int(group_dic["node_bytes"])
                    }
                except:
                    self.__logger.error(
                        'Incomplete information for group %s in site %s in %s'
                        % (group, site, url))

        return groupusage
Esempio n. 26
0
    def __get_nodeusage_values(self):

        url = "https://cmsweb.cern.ch/phedex/datasvc/json/prod/nodeusage"
        nodeusage_aux = get_json_data(url)
        nodeusage = {}

        #Massage information to make it more accessible
        for site_dic in nodeusage_aux['phedex']['node']:

            site = site_dic["name"]
            try:
                nodeusage[site] = {
                    "src_node_bytes": int(site_dic["src_node_bytes"]),
                    "nonsrc_node_bytes": int(site_dic["nonsrc_node_bytes"]),
                    "noncust_dest_bytes": int(site_dic["noncust_dest_bytes"]),
                    "noncust_node_bytes": int(site_dic["noncust_node_bytes"]),
                    "cust_node_bytes": int(site_dic["cust_node_bytes"]),
                    "cust_dest_bytes": int(site_dic["cust_dest_bytes"])
                }
            except:
                self.__logger.error(
                    'Incomplete information for site %s in %s' % (site, url))

        return nodeusage
Esempio n. 27
0
            serial_number_length = len(
                str(int(first_serial_number) +
                    scratch_codes_count).zfill(first_serial_number_length))
            scratch_codes = scratch.generate(serial_number_length, hash_type,
                                             hash_length, scratch_codes_count)
            write_json_data(scratch_codes, filename)

        else:
            print('Неправильное имя файла.')

    if mode == '2':
        filename = input(
            'Введите имя файла, откуда будут взяты скретч-коды для проверки: ')

        if os.path.isfile(filename):
            scratch_codes = get_json_data(filename)
            serial_number_length = len(
                str(int(first_serial_number) +
                    len(scratch_codes)).zfill(first_serial_number_length))
            scratch.create_activated_codes_table()
            checked_codes, right_codes = scratch.check(scratch_codes,
                                                       serial_number_length,
                                                       hash_type, hash_length)

            if right_codes:
                scratch.add_new_codes(right_codes)

            print(checked_codes)

        else:
            print('Неправильное имя файла.')
Esempio n. 28
0
    for i, (x, y, w, h) in enumerate(faces):
        cv.putText(img, predictions[i], (x, y - 10), cv.FONT_HERSHEY_SIMPLEX,
                   0.5, (0, 255, 0), 2)
        cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

    cv.imwrite('img_with_boxes.jpg', img)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--image_path",
                        help="path to image from GoT",
                        type=str)
    parser.add_argument("-t",
                        "--test",
                        help="run test on 5 sample images",
                        type=bool,
                        default=False)
    args = parser.parse_args()

    tf.logging.set_verbosity(tf.logging.ERROR)

    data = get_json_data('GoT_Face_Labelling_Ep7.json')

    predict_fn = tf.contrib.predictor.from_saved_model(
        "trained/trained_0/1531036933/")
    if args.test:
        run_test(predict_fn)
    else:
        run_predict_image(image_file=args.image_path, predict_fn=predict_fn)
Esempio n. 29
0
def json_data():
    return Response(get_json_data(), mimetype='application/json')
Esempio n. 30
0
def get_binary_text_ROI(gray):
    parameters = get_json_data()
    threshold_width = float(parameters["threshold_width"]["value"])
    threshold_height = float(parameters["threshold_height"]["value"])

    mser = cv2.MSER_create()
    mser.setMaxArea(800)
    contours, bboxes = mser.detectRegions(gray)
    (height, width) = gray.shape[:2]
    coords = []
    contours = sorted(contours, key=contour_rec_ara, reverse=True)
    for c in contours:
        bbox = cv2.boundingRect(c)
        x, y, w, h = bbox
        if x > width * threshold_width and y < height * threshold_height and float(
                w / h) <= 1 and w < width / 15 and h < height / 15 and not_inside(bbox, coords):
            coords.append(c)
    canvas = np.zeros_like(gray)
    for cnt in coords:
        xx = cnt[:, 0]
        yy = cnt[:, 1]
        color = 255
        canvas[yy, xx] = color
    cv2.imshow("canvas1",canvas)
    im2, contours, hierarchy = cv2.findContours(canvas.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contour_info_list = []
    center_points = []
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        cnt = cv2.convexHull(cnt)
        shape = detect(cnt)
        if shape != "unidentified":
            center_points.append([int(x + w / 2), int(y + h / 2)])
            contour_dict = {}
            contour_dict["rect"] = [x, y, w, h]
            contour_dict["contour"] = cnt
            contour_dict["shape"] = shape
            contour_dict["center"] = [int(x + w / 2), int(y + h / 2)]
            contour_info_list.append(contour_dict)

    ### Use RANSAC to find the line of center points

    num_centers = find_region_RANSAC(center_points, "", 800)
    ### find the container no. position.Remove the forground not in num_centers.
    if len(num_centers) == 0:
        return canvas
    index = num_centers.shape[0] - 1
    for item in contour_info_list:
        if not is_point_in(item["center"], num_centers):
            rect = item["rect"]
            canvas[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]] = 0
        else:
            if np.array_equal(item["center"], num_centers[index]) and item["shape"] == "square":
                canvas = remove_rect(canvas, item["contour"])
    ##Get image patch
    x_left = num_centers[0][0]
    x_right = num_centers[num_centers.shape[0] - 1][0]

    tmp_centers = num_centers[num_centers[:, 1].argsort()]
    y_top = tmp_centers[0][1]
    y_bottom = tmp_centers[tmp_centers.shape[0] - 1][1]
    #print("num_centers:", x_left, x_right, y_top, y_bottom)
    #print(tmp_centers)
    canvas = canvas[y_top - 18:y_bottom+18, x_left-18:x_right+18]


    return canvas
Esempio n. 31
0
def json_data():
    return Response(get_json_data(), mimetype='application/json')
Esempio n. 32
0
x.execute(query)
query = """DELETE FROM tbl_Results WHERE weekID<>%d AND weekID<>%d""" % (
    today_id, (today_id - 1))
x.execute(query)
query = """DELETE FROM tbl_Ratios WHERE weekID<>%d AND weekID<>%d""" % (
    today_id, (today_id - 1))
x.execute(query)
DB_CONN.commit()

query = """SELECT matchID FROM tbl_MatchInfo WHERE weekID=%d AND was_played=False""" % (
    today_id - 1)
results = x.execute(query)
matches_previous = {}

if results:
    j = get_json_data(URL_MATCHES % (today_id - 1))
    matches_previous = {
        j['m'][i]['d']: j['m'][i]['m']
        for i in range(len(j['m']))
    }

query = """SELECT matchID FROM tbl_MatchInfo WHERE weekID=%d AND was_played=True""" % today_id
x.execute(query)
results = [row[0] for row in x.fetchall()]

j = get_json_data(URL_MATCHES % today_id)
matches_last = {j['m'][i]['d']: j['m'][i]['m'] for i in range(len(j['m']))}

all_matches = concat(matches_previous.values() + matches_last.values())

for md in concat(matches_previous.values()):
Esempio n. 33
0
x = DB_CONN.cursor()
query = """DELETE FROM tbl_MatchInfo WHERE weekID<>%d AND weekID<>%d""" % (today_id, (today_id -1))
x.execute(query)
query = """DELETE FROM tbl_Results WHERE weekID<>%d AND weekID<>%d""" % (today_id, (today_id -1))
x.execute(query)
query = """DELETE FROM tbl_Ratios WHERE weekID<>%d AND weekID<>%d""" % (today_id, (today_id -1))
x.execute(query)
DB_CONN.commit()

query = """SELECT matchID FROM tbl_MatchInfo WHERE weekID=%d AND was_played=False""" % (today_id -1)
results = x.execute(query)
matches_previous = {}

if results:
  j = get_json_data(URL_MATCHES % (today_id - 1))
  matches_previous = {j['m'][i]['d']: j['m'][i]['m'] for i in range(len(j['m']))}

query = """SELECT matchID FROM tbl_MatchInfo WHERE weekID=%d AND was_played=True""" % today_id
x.execute(query)
results = [row[0] for row in x.fetchall()]

j = get_json_data(URL_MATCHES % today_id)
matches_last = {j['m'][i]['d']: j['m'][i]['m'] for i in range(len(j['m']))}

all_matches = concat(matches_previous.values() + matches_last.values())

for md in concat(matches_previous.values()):
    match = Match(md, weekid=today_id - 1)
    match.save_all()