def copy_config():
    config = utils.get_json_file_data('config.json')

    planConfigRootPath = utils.format_path(
        os.path.join(os.getcwd(), config['planConfigDir']))
    serverSrcPath = utils.format_path(
        os.path.join(planConfigRootPath, config['serverOnlyDirName']))
    commonSrcPath = utils.format_path(
        os.path.join(planConfigRootPath, config['commonDirName']))
    activitiesSrcPath = utils.format_path(
        os.path.join(planConfigRootPath, config['activitiesDirName']))

    dstPath = utils.format_path(
        os.path.join(os.getcwd(), config['serverConfigDir']))

    filterList = []
    for filterFile in config['serverNotCopy']:
        filterPath = utils.format_path(
            os.path.join(planConfigRootPath, filterFile))
        filterList.append(filterPath)

    utils.logger_info("更新配置......")
    os.system("svn update " + (planConfigRootPath).encode('gbk'))
    utils.logger_info("配置更新完成,准备拷贝!")

    utils.logger("配置源目录:")
    utils.logger(serverSrcPath.encode('utf-8'))
    utils.logger(commonSrcPath.encode('utf-8'))
    utils.logger(activitiesSrcPath.encode('utf-8'))
    utils.logger("拷贝到:" + dstPath.encode('utf-8'))
    utils.cover_copy_files(commonSrcPath, dstPath, filterList)
    utils.cover_copy_files(serverSrcPath, dstPath, filterList)
    utils.cover_copy_files(activitiesSrcPath, dstPath, filterList)
    utils.logger_info('配置拷贝结束!')
Exemple #2
0
async def create_disk_worker(project, rg_name, uri, disk_name, location, f):
    con = create_db_con()
    client_id = Project.objects(name=project)[0]['client_id']
    secret = Project.objects(name=project)[0]['secret']
    tenant_id = Project.objects(name=project)[0]['tenant_id']
    subscription_id = Project.objects(name=project)[0]['subscription_id']
    creds = ServicePrincipalCredentials(client_id=client_id,
                                        secret=secret,
                                        tenant=tenant_id)
    compute_client = ComputeManagementClient(creds, subscription_id)
    con.close()
    async_creation = compute_client.images.create_or_update(
        rg_name, disk_name, {
            'location': location,
            'storage_profile': {
                'os_disk': {
                    'os_type': 'Linux',
                    'os_state': "Generalized",
                    'blob_uri': uri,
                    'caching': "ReadWrite",
                    'storage_account_type': 'StandardSSD_LRS'
                }
            },
            'hyper_vgeneration': 'V1'
        })
    image_resource = async_creation.result()
    try:
        BluePrint.objects(project=project,
                          host=disk_name).update(image_id=disk_name,
                                                 status='40')
    except Exception as e:
        print("disk creation updation failed: " + repr(e))
        logger("Disk creation updation failed: " + repr(e), "warning")
    finally:
        con.close()
Exemple #3
0
def parse_mail(mailStr):
    original = email.message_from_string(mailStr)
    org_subj = original['Subject']
    try:
        subj = decode_header(original['Subject'])[0][0]
        win_subj = subj.decode('utf-8')
        fin_subj = win_subj.encode("ascii", "ignore")
        logger(['subject', org_subj, subj, fin_subj])
    except:
        fin_subj = org_subj
    parsed_mail_from = parse_mail_from(original['From'])
    parsed_mail_to = parse_mail_from(original['To'])
    parsed_mail = {
        "from": parsed_mail_from,
        "sub": fin_subj,
        "to": parsed_mail_to,
        "date": adaptTime(original['Date']),
        "phoneNumber": '+919722761117'
    }
    for part in original.walk():
        if part.get_content_type() == "text/plain":
            parsed_mail["body"] = part.get_payload(decode=True)
        else:
            continue

    logger(
        ['parsed mail time is', original['Date'],
         adaptTime(original['Date'])])
    return parsed_mail
Exemple #4
0
def create_vnet(rg_name, vnet_name, cidr, location, project):
    print("Provisioning a vnet...some operations might take a minute or two.")
    con = create_db_con()
    client_id = Project.objects(name=project)[0]['client_id']
    secret = Project.objects(name=project)[0]['secret']
    tenant_id = Project.objects(name=project)[0]['tenant_id']
    subscription_id = Project.objects(name=project)[0]['subscription_id']
    creds = ServicePrincipalCredentials(client_id=client_id,
                                        secret=secret,
                                        tenant=tenant_id)
    network_client = NetworkManagementClient(creds, subscription_id)
    poller = network_client.virtual_networks.create_or_update(
        rg_name, vnet_name, {
            "location": location,
            "address_space": {
                "address_prefixes": [cidr]
            }
        })
    vnet_result = poller.result()
    print(
        "Provisioned virtual network {vnet_result.name} with address prefixes {vnet_result.address_space.address_prefixes}"
    )
    try:
        BluePrint.objects(network=cidr).update(vpc_id=vnet_result.name,
                                               status='43')
    except Exception as e:
        print("Vnet creation failed to save: " + repr(e))
        logger("Vnet creation failed to save: " + repr(e), "warning")
        return False
    finally:
        con.close()
    return True
def handle_file(args, name, now):
    meta = {}
    count = 0
    col_size = len(args.columnName.split(','))
    try:
        rf = open(args.sourceFile, 'r', encoding='utf-8')
    except:
        utils.edit_list(now, 'err_generate\n')
        logger().error(['err_data_source', args.sourceFile])
        return {}

    with open(name, 'a', encoding='utf-8') as wf:
        for line in rf:
            count = count + 1
            rows = line.strip().split(args.separator)
            if len(rows) != col_size:
                utils.edit_list(now, 'err_generate\n')
                logger().error(['err_source_info', now+'/'+str(count)])
                return {}
            wf.write('|||'.join(('%s' % i).replace('|||', '---') for i in rows).replace('\n', '') + '\n')

    with open(name, 'r', encoding='utf-8') as f:
        content = f.read()
    meta.__setitem__("size", count)
    meta.__setitem__("md5", utils.get_md5(content))
    meta.__setitem__("colName", args.columnName)
    return meta
Exemple #6
0
def parse_mail_from(fromStr):
    logger(fromStr)
    reg_grp = re.search("<(.+?)>", fromStr)
    mail_from = fromStr
    if reg_grp is not None:
        mail_from = reg_grp.group(1)
    return mail_from
def run(args):
    # 准备阶段
    now = str(int(round(time.time() * 1000)))
    args.tagName = utils.decodeURL(args.tagName)
    utils.write_to_list(now + '|' + args.settingsActive.replace('|', '-') + '|' + args.tagName.replace('|', '-') + '|' + 'generating' + '\n')
    path = args.path + "/" + now
    utils.mkdir(path)
    name = args.tagOwner + "." + now
    real_path = path + "/" + name + ".data"

    # 生成数据包文件
    meta = source_filter()[args.sourceType](args, real_path, now)

    # 生成meta文件
    if meta != {}:
        real_path = path + "/" + name + ".meta"
        meta.__setitem__("timestamp", now)
        # meta.__setitem__("dataName", real_path)
        meta.__setitem__("tagName", args.tagName)
        meta_json = utils.to_json(meta)
        generate_meta(real_path, utils.encode(meta_json))
        utils.edit_list(now, 'generated' + '\n')
        logger().info(['generated', now])
    else:
        logger().error(['err_generate', now])
Exemple #8
0
def get_vm_types(project):
    client = ''
    location = ''
    machine_types = []
    try:
        con = create_db_con()
        subscription_id = Project.objects(name=project)[0]['subscription_id']
        client_id = Project.objects(name=project)[0]['client_id']
        tenant_id = Project.objects(name=project)[0]['tenant_id']
        secret_id = Project.objects(name=project)[0]['secret']
        location = Project.objects(name=project)[0]['location']
        creds = ServicePrincipalCredentials(client_id=client_id,
                                            secret=secret_id,
                                            tenant=tenant_id)
        client = ComputeManagementClient(creds, subscription_id)
        machine_types = list_available_vm_sizes(client,
                                                region=location,
                                                minimum_cores=1,
                                                minimum_memory_MB=768)
        flag = True
    except Exception as e:
        print(repr(e))
        logger("Fetching vm details failed: " + repr(e), "warning")
        flag = False
    con.close()
    return machine_types, flag
async def download_worker(osdisk_raw, project, host):
    con = create_db_con()
    account_name = Storage.objects(project=project)[0]['storage']
    container_name = Storage.objects(project=project)[0]['container']
    access_key = Storage.objects(project=project)[0]['access_key']
    sas_token = sas.generate_sas_token(account_name, access_key)
    pipe_result = ''
    file_size = '0'
    try:
        cur_path = os.getcwd()
        path = cur_path + "/osdisks/" + osdisk_raw
        if not os.path.exists(path):
            os.popen(
                'echo "download started"> ./logs/ansible/migration_log.txt')
            url = "https://" + account_name + ".blob.core.windows.net/" + container_name + "/" + osdisk_raw + "?" + sas_token
            command1 = "azcopy copy --recursive '" + url + "' '" + path + "'"
            os.popen('echo ' + command1 +
                     '>> ./logs/ansible/migration_log.txt')
            process1 = await asyncio.create_subprocess_shell(command1,
                                                             stdin=PIPE,
                                                             stdout=PIPE,
                                                             stderr=STDOUT)
            await process1.wait()
            BluePrint.objects(project=project, host=host).update(status='32')
    except Exception as e:
        print(repr(e))
        logger(str(e), "warning")
    finally:
        con.close()
def E(level=1):
    if level == 0:
        from common import level1 as P
        P = partial(P, FOnly=True) # high order function, here we only test LEVEL-1 F CNN
    elif level == 1:
        from level import level1 as P
    elif level == 2:
        from level import level2 as P
    else:
        from level import level3 as P

    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 5))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = P(img, bbox)
        plot_point[i] = landmarkP

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error
Exemple #11
0
	def fixfilebase(self, f):
		assert f.startswith(self.expectedpath), [f, self.expectedpath]
		f = self.newpath + f[len(self.expectedpath):]
		schemafile = f.split(':')[-1].split('#')[0]
		if not cmn.fexists(schemafile):
			 logger('#err ...schema file {0} not found\n'.format(schemafile))
		return f
Exemple #12
0
    def begin_backend_session(self, sysid, password):
        self.logging.info(('start login', sysid, password))
        logger().info(('start login', sysid, password))
        user = self.application.dbutil.isloginsuccess(sysid,
                                                      password)  # mysql数据库
        if not user:
            print("login failed")
            return False
        self.logging.info(('login checked', sysid, password))

        # 添加用户登录记录
        name = "登陆用户:" + sysid if type(
            sysid) != bytes else "登陆用户:" + sysid.decode()
        ip_info = self.request.remote_ip
        logtime = time.strftime("%Y-%m-%d %H:%M:%S",
                                time.localtime(time.time()))
        sql = "insert into tb_login_record values(null, '%s', '%s', '%s')" % (
            name, ip_info, logtime)
        self.application.dbutil.execute(sql)

        # 查找该用户的所有权限
        # permission = self.application.dbutil.getFindPermission(sysid)
        # arr = []
        # for p in permission:
        #     arr.append(p["title"])

        self.session["data"] = user
        self.session["sysid"] = sysid
        # self.session['permission'] = arr
        self.session.save()
        return True
Exemple #13
0
def create_subnet(rg_name, vnet_name, subnet_name, cidr, project):
    print(
        "Provisioning a subnet...some operations might take a minute or two.")
    con = create_db_con()
    client_id = Project.objects(name=project)[0]['client_id']
    secret = Project.objects(name=project)[0]['secret']
    tenant_id = Project.objects(name=project)[0]['tenant_id']
    subscription_id = Project.objects(name=project)[0]['subscription_id']
    creds = ServicePrincipalCredentials(client_id=client_id,
                                        secret=secret,
                                        tenant=tenant_id)
    network_client = NetworkManagementClient(creds, subscription_id)
    con.close()
    poller = network_client.subnets.create_or_update(rg_name, vnet_name,
                                                     subnet_name,
                                                     {"address_prefix": cidr})
    subnet_result = poller.result()
    print(
        "Provisioned virtual subnet {subnet_result.name} with address prefix {subnet_result.address_prefix}"
    )
    try:
        con = create_db_con()
        print(subnet_result.id)
        BluePrint.objects(subnet=cidr).update(subnet_id=str(subnet_result.id),
                                              status='60')
    except Exception as e:
        print("Subnet creation failed to save: " + repr(e))
        logger("Subnet creation failed to save: " + repr(e), "warning")
        return False
    finally:
        con.close()
    return True
def handle_db(args, name, now):
    try:
        conn = utils.get_mysql_conn(args.host, args.dbUser, args.dbPassword, args.database)
        cursor = conn.cursor()
        cursor.execute(args.sql)
    except:
        utils.edit_list(now, 'err_generate\n')
        logger().error(['err_data_source', now+'/'+args.host])
        return {}
    meta = {}
    count = 0
    rows = cursor.fetchmany(int(args.fetchSize))
    with open(name, 'a', encoding='utf-8') as f:
        while rows:
            for row in rows:
                v = '|||'.join(('%s' % i).replace('|||', '---') for i in list(row))
                f.write(v.replace('\n', '') + '\n')
                count = count + 1
            if count % int(args.fetchSize) == 0:
                rows = cursor.fetchmany(int(args.fetchSize))
            else:
                rows = False
    with open(name, 'r', encoding='utf-8') as f:
        content = f.read()
    index = cursor.description
    cols = []
    for i in range(len(index)):
        cols.append(index[i][0])
    meta.__setitem__("size", count)
    meta.__setitem__("md5", utils.get_md5(content))
    meta.__setitem__("colName", ','.join(cols))
    return meta
Exemple #15
0
def runVideo(cap):

    Frame.frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    Frame.frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    first_go = True
    frame = None
    current_frame = None
    previous_frame = None

    while (True):

        if first_go != True:
            previous_frame = Frame(current_frame.getFrame())

        ret, frame = cap.read()
        if ret == False:
            break
        else:
            current_frame = Frame(frame)

        if first_go != True:

            difference_frame = current_frame.getDifferenceFrame(previous_frame)

            thresholded_frame = difference_frame.getBinary(threshold=100)

            dilated_frame = thresholded_frame.getDilated(iterations=10)

            valid_contours = dilated_frame.findObjects(
                minContourZone=settings.MIN_COUNTOUR_ZONE)
            Tracker.registerNewObjects(valid_contours, current_frame)

            # set which frame to display for user
            ready_frame = current_frame

            ready_frame.addBoundingBoxesFromContours(Tracker)

            st.write("test")
            ready_frame.putText("Threads: " + str(threading.active_count()),
                                (7, 20))
            ready_frame.putText(
                "Object Bufor size: " + str(len(Tracker.objectsToClassify)),
                (7, 50))
            ready_frame.putText("FPS: " + FPS.tick(), (7, 80))
            ready_frame.putText(
                "Cars passed: " + str(len(Tracker.lostObjects)), (7, 110))
            ready_frame.putText("Test var: " + str(12), (7, 140))

            ready_frame.show()
        else:
            first_go = False
            current_frame.show()

        if cv2.waitKey(1) & 0xFF == ord('q'):
            logger('cars found: ' + str(Tracker.lostObjects), settings.LOG)
            break

    cap.release()
    cv2.destroyAllWindows()
    stopThreads()
async def conversion_worker(osdisk_raw, project, host):
    con = create_db_con()
    account_name = Storage.objects(project=project)[0]['storage']
    container_name = Storage.objects(project=project)[0]['container']
    access_key = Storage.objects(project=project)[0]['access_key']
    sas_token = sas.generate_sas_token(account_name, access_key)
    pipe_result = ''
    try:
        osdisk_vhd = osdisk_raw.replace(".raw", ".vhd")
        cur_path = os.getcwd()
        path = cur_path + "/osdisks/" + osdisk_raw
        vhd_path = cur_path + "/osdisks/" + osdisk_vhd
        print("Start converting")
        print(path)
        os.popen('echo "start converting">> ./logs/ansible/migration_log.txt')
        command2 = "qemu-img convert -f raw -o subformat=fixed -O vpc " + path + " " + vhd_path
        process2 = await asyncio.create_subprocess_shell(command2,
                                                         stdin=PIPE,
                                                         stdout=PIPE,
                                                         stderr=STDOUT)
        await process2.wait()
        BluePrint.objects(project=project, host=host).update(status='34')
        os.popen(
            'echo "Conversion completed" >> ./logs/ansible/migration_log.txt')
    except Exception as e:
        print(str(e))
        logger(str(e), "warning")
        file_size = '0'
    finally:
        con.close()
Exemple #17
0
 def init_messages_dict(self):
     messages = {}
     count = 0
     for language_dir in [
             d for d in listdir(self.translations_dir)
             if path.isdir(path.join(cwd[0], self.translations_dir, d))
     ]:
         temp_messages = {}
         for fileName in listdir('{}/{}'.format(self.translations_dir,
                                                language_dir)):
             if self.translations_extension in fileName:
                 try:
                     filePath = '{}/{}/{}'.format(self.translations_dir,
                                                  language_dir, fileName)
                     with open(filePath, 'r',
                               encoding='utf-8') as translation_file:
                         temp_messages[fileName.replace(
                             self.translations_extension,
                             '')] = load(translation_file)
                         count += 1
                 except Exception as err:
                     logger('文件[{}] 添加失败 ({})'.format(filePath, err))
         messages[language_dir] = temp_messages
     self.messages.update(messages)
     logger('支持语言数({}),共添加翻译文件共({})个..完毕!'.format(len(messages), count))
Exemple #18
0
def main(args):

	if not args.has('-config'):
		args.add_key('-config',  "./config.json")

	logger(str([args.keys, args.args()]) + '\n')

	if not args.has('-out'):
		logger('#__noOutFileGiven___\n')
		return

	if not args.has('-dbg') and (cmn.fexists(args['-out']) and not args.has('-overwrite-outfile')):
		logger('#__outfile:{0} exists\n'.format(args['-out']))
		return

	#try:
	if True:
		if args.has('-extract'):
			import sraparse
			return sraparse.SRAParseObjSet.extract_attributes_to_json(args.args())
		elif args.has("-test-sample"):
			testargs = ["./examples/samples.xml", "-config:{0}".format(args['-config']), "-out:./examples/samples.versioned.xml"]
			validate_sample.main(Config(testargs))
		elif args.has("-sample"):
			validate_sample.main(args)
		elif args.has("-experiment"):
			validate_experiment.main(args) 
		else:
			raise NotImplementedError("#__unknownArguments__")
	else:
	#except Exception as err:
		logger('#__unexpected__\n')
		logger(str(err.message) + '\n')
    def from_csv(self, path, doc_id):
        """
            read CSV and created inverted index for 'Snippet' column
        """
        try:
            csv = read_csv(path)
        except Exception as e:
            logger(str(e) + str(path))
            return

        # for i_row in tqdm(range(len(csv['Snippet'])),
        #                   bar_format='{l_bar}{bar:50}{r_bar}{bar:-50b}',
        #                   desc=("parsing csv file : " + path.split("/")[-1])):
        for i_row in range(len(csv['Snippet'])):
            tokens = self._tokenizer.tokenize(csv['Snippet'][i_row])
            if REMOVE_STOP_WORDS:
                tokens = [i for i in tokens if i not in self._stop_words]
            doc = self._n_docs + i_row
            self._post2doc_mapper[doc] = [doc_id, i_row]
            self._tmp_doc[doc] = []
            for token in tokens:
                if token in self._vocab.keys():
                    self._vocab[token] += 1
                else:
                    self._vocab[token] = 1
                term = self._stemmer.stem(self._lemmatizer.lemmatize(token))
                self._tmp_doc[doc].append(term)
                self._trie.add_string(term, doc)

        self._n_docs += len(csv['Snippet'])
Exemple #20
0
def xoauth_authenticate(emailId, access_token):
    logger([access_token, emailId])

    def _auth(*args, **kwargs):
        return 'user=%s\1auth=Bearer %s\1\1' % (emailId, access_token)

    return 'XOAUTH2', _auth
Exemple #21
0
 def start(self):
     # self.start.__doc__ = self._thread.start.__doc__
     if not self.started:
         self.started = True
         self.return_value = self.target(*self.args, **self.kwargs)
         logger('fake_thread_started', self.target.__name__)
     else:
         raise RuntimeError()
Exemple #22
0
	def obj_id(self, e):
		try:
			idblock = e.get('@idblock', dict())
			tags = [idblock[k]  for k in ['alias', 'refname', 'accession'] if k in idblock]
			return 'unknown' if not tags else self.sanitizer.filter_alphan('.'.join(tags), '.-_') 
		except Exception as e:
			logger('#__couldNotExactId__:{0}\n'.format(e ))
			return 'unknown'
Exemple #23
0
def get_user_list():
    users = requests.get('http://localhost:8082/users').json()
    verified_users = []
    for user in users:
        if user['verified'] is True:
            verified_users.append(user)
    logger(['verified users', verified_users])
    return verified_users
def get_words(terminals, landmarks, rel=None):
    words = []
    probs = []
    entropy = []

    for n,lmk in zip(terminals, landmarks):
        # if we could not get an expansion for the LHS, we just pass down the unexpanded nonterminal symbol
        # it gets the probability of 1 and entropy of 0
        if n in NONTERMINALS:
            words.append(n)
            probs.append(1.0)
            entropy.append(0.0)
            continue

        lmk_class = (lmk.object_class if lmk else None)
        lmk_color = (lmk.color if lmk else None)
        rel_class = rel_type(rel)
        dist_class = (rel.measurement.best_distance_class if hasattr(rel, 'measurement') else None)
        deg_class = (rel.measurement.best_degree_class if hasattr(rel, 'measurement') else None)

        cp_db = CWord.get_word_counts(pos=n,
                                      lmk_class=lmk_class,
                                      lmk_ori_rels=get_lmk_ori_rels_str(lmk),
                                      lmk_color=lmk_color,
                                      rel=rel_class,
                                      rel_dist_class=dist_class,
                                      rel_deg_class=deg_class)

        if cp_db.count() <= 0:
            logger( 'Could not expand %s (lmk_class: %s, lmk_color: %s, rel: %s, dist_class: %s, deg_class: %s)' % (n, lmk_class, lmk_color, rel_class, dist_class, deg_class) )
            terminals.append( n )
            continue

        logger( 'Expanded %s (lmk_class: %s, lmk_color: %s, rel: %s, dist_class: %s, deg_class: %s)' % (n, lmk_class, lmk_color, rel_class, dist_class, deg_class) )

        ckeys, ccounts = zip(*[(cword.word,cword.count) for cword in cp_db.all()])

        ccounter = {}
        for cword in cp_db.all():
            if cword.word in ccounter: ccounter[cword.word] += cword.count
            else: ccounter[cword.word] = cword.count

        ckeys, ccounts = zip(*ccounter.items())

        # print 'ckeys', ckeys
        # print 'ccounts', ccounts

        ccounts = np.array(ccounts, dtype=float)
        ccounts /= ccounts.sum()

        w, w_prob, w_entropy = categorical_sample(ckeys, ccounts)
        words.append(w)
        probs.append(w_prob)
        entropy.append(w_entropy)

    p, H = np.prod(probs), np.sum(entropy)
    # print 'expanding %s to %s (p: %f, H: %f)' % (terminals, words, p, H)
    return words, p, H
Exemple #25
0
    def begin_backend_session1(self, sysid, password):
        self.logging.info(('start login', sysid, password))
        logger().info(('start login', sysid, password))
        if not self.application.backend_auth.login(sysid,
                                                   password):  # mongodb数据库
            print("login failed")
            return False
        self.logging.info(('login checked', sysid, password))
        user = self.db.tb_system_user.find_one({'userid': sysid}, {
            'passwd': 0,
            '_id': 0
        })
        if not user:
            print("no user exists!", sysid)
            return False

        now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        last = self.db.tb_login_record.find_one({}, {
            "id": 1,
            "_id": 0
        },
                                                sort=[("id",
                                                       pymongo.DESCENDING)])
        id = int(last.get("id", 0)) + 1 if last else 1
        ip_info = self.request.remote_ip
        self.db.tb_login_record.insert({
            "userid": sysid,
            "ip": ip_info,
            "atime": now,
            "id": id
        })

        # user["db"] = self.application.settings["database"]
        # user["system"] = self.application.settings["system"]
        logininfos = user.get('login', [])
        logininfos.append({"ip": ip_info, "time": now})
        self.db.tb_system_user.update(
            {'userid': sysid},
            {'$set': {
                'status': 'online',
                "login": logininfos[-10:]
            }})
        user['status'] = "online"
        user['login'] = logininfos[-10:]
        print("==========================", logininfos, logininfos[-10:])
        print(user['login'])

        # 查找该用户的所有权限
        # permission = self.application.dbutil.getFindPermission(sysid)
        # arr = []
        # for p in permission:
        #     arr.append(p["title"])

        self.session["data"] = user
        self.session["sysid"] = sysid
        # self.session['permission'] = arr
        self.session.save()
        return True
def get_sentence_meaning_likelihood(sentence, lmk, rel):
    modparse = get_modparse(sentence)
    t = ParentedTree.parse(modparse)
    print '\n%s\n' % t.pprint()

    probs, entropies, lrpc, tps = get_tree_probs(t, lmk, rel)
    if np.prod(probs) == 0.0:
        logger('ERROR: Probability product is 0 for sentence: %s, lmk: %s, rel: %s, probs: %s' % (sentence, lmk, rel, str(probs)))
    return np.prod(probs), sum(entropies), lrpc, tps
	def validate_semantics(self, attrs):
		attributes = attrs['attributes']
		if 'donor_age_unit' in attributes and attributes['donor_age_unit'] == 'year' and isinstance(attributes['donor_age'], int):
			age = int(attributes['donor_age'])
			if age > 90:
				logger('#__error: Donors over 90 years of age should be entered as "90+"\n')
				return False

		return True
	def __init__(self, sra, validators):
		super(SampleValidator, self).__init__(validators)
		self.normalize = lambda t: t.lower().replace(' ', '_')
		self.sra = sra
		self.xmljson = self.sra.obj_xmljson()
		for (xml, attrs) in self.xmljson:
			logger('\n#__normalizingTags:{0}\n'.format(attrs['title']))
			attrs['attributes'] = self.normalize_tags(attrs['attributes'])
		logger("\n\n")
Exemple #29
0
    def run_once(self, make_thread=True, last_update_id=None, update_timeout=30):
        """ Check the the messages for commands and make a Thread or FakeThread with the command depending on make_thread.

        Args:
          make_thread:
            True: the function returns a list with threads. Which didn't start yet.
            False: the function returns a list with FakeThreads. Which did'nt start yet.
          last_update_id:
            the offset arg from getUpdates and is kept up to date within this function
          update_timeout:
            timeout for updates. can be None for no timeout.

        Returns:
          A tuple of two elements. The first element is a list with Threads or FakeThreads which didn't start yet.
          The second element is the updated las_update_id
         """
        if make_thread:
            ch_Thread = threading.Thread
        else:
            ch_Thread = FakeThread
        bot_name = self.bot.username
        threads = {}
        self._getupdates_can_write.append(True)
        get_updates_index = len(self._getupdates_can_write) - 1
        get_updates_thread = threading.Thread(target=self.get_updates,
                                              kwargs={'index': get_updates_index,
                                                      'offset': last_update_id})
        get_updates_thread.start()
        get_updates_thread.join(timeout=update_timeout)
        if get_updates_thread.isAlive():
            logger('ERROR getupdates timed out, using empty list')
            self._getupdates_can_write[get_updates_index] = False
            self._last_updates = []
        updates = self._last_updates
        for update in updates:
            last_update_id = update.update_id + 1
            message = update.message
            if len(message.text) == 0:
                message.text = '   '
            if message.text[0] == '/':
                command, username = message.text.split(' ')[0], bot_name
                if '@' in command:
                    command, username = command.split('@')
                if username == bot_name:
                    command_func = self._get_command_func(command)
                    if command_func is not None:
                        self.bot.sendChatAction(chat_id=update.message.chat.id, action=telegram.ChatAction.TYPING)
                        if self.isValidCommand is None or self.isValidCommand(update):
                            t = ch_Thread(target=command_func, args=(update,))
                            threads[(message.text, update.message.chat.id)] = t
                        else:
                            t = ch_Thread(target=self._command_not_valid, args=(update,))
                            threads[(message.text + ' unauthorized', update.message.chat.id)] = t
                    else:
                        t = ch_Thread(target=self._command_not_found, args=(update,))
                        threads[(message.text + ' not found', update.message.chat.id)] = t
        return threads, last_update_id
Exemple #30
0
 def send_complete(self, server, recipient, error_text='', status=-1, error=None):
     if status == 1:
         self.send_success += 1
         logger('邮件发送成功 (server [{}] to {}) 成功({}) [{}]'.format(server.name, recipient, self.send_success, self.translator.language))
         return self.translator.tran('mail.complete.success')
     else:
         self.send_failed += 1
         logger('邮件发送失败 (server [{}] to {}) [{}] ({}) 失败({}) [{}]'.format(server.name, recipient, error_text, str(error), self.send_failed, self.translator.language))
         return self.translator.tran('mail.complete.failed') + error_text
def metric_compute(val_labels, predict_labels):
    logger("the result of prediction of validation is as follow:")
    occlu_ratio = occlu_ratio_compute(val_labels)
    accuracy = accuracy_compute(val_labels, predict_labels)
    occlu_recall = recall_compute(val_labels, predict_labels, mode="occlu")
    clear_recall = recall_compute(val_labels, predict_labels, mode="clear")
    print("occlu_ratio is {}".format(occlu_ratio))
    print("accuracy is {}".format(accuracy))
    print("occlu_recall is {}".format(occlu_recall))
    print("clear_recall is {}".format(clear_recall))
Exemple #32
0
	def extract_additional_experiment_attributes(self, obj, hashed):
		strategy = hashed.get("library_strategy", "" ).strip()
		if not strategy:
			strategy = self.extract_optional(obj, ".//SEQUENCING_LIBRARY_STRATEGY")
			if not strategy or len(strategy) > 1:
				logger("#warn__: cannot parse 'library_strategy' or 'library_sequencing_strategy'..  {0}\n ".format(str(strategy)))
			else:
				logger("#warn__: updated 'library_strategy' with 'library_sequencing_strategy'..  {0}\n ".format(str(strategy[0].text)))
				hashed["library_strategy"] = strategy[0].text.strip()
		return hashed
Exemple #33
0
	def from_sra_main_to_attributes(self, hashed):
		if 'library_strategy' in hashed:
			if 'LIBRARY_STRATEGY' in hashed['attributes'] or 'library_strategy' in hashed['attributes']:
				lib_strat_attr = 'LIBRARY_STRATEGY' if 'LIBRARY_STRATEGY' in hashed['attributes'] else 'library_strategy'
				hashed['attributes']['LIBRARY_STRATEGY_IHEC'] = hashed['attributes'][lib_strat_attr]
				old_lib_start = hashed['attributes'].pop(lib_strat_attr)
				logger("#warn:__library_strategy__ defined in both SRA block and as IHEC attribute:{0}, value pushed into 'LIBRARY_STRATEGY_IHEC'\n".format(old_lib_start))
			hashed['attributes']['LIBRARY_STRATEGY'] = [hashed['library_strategy']]
		#hashed['attributes']['@idblock'] = hashed['@idblock']	
		return hashed
Exemple #34
0
 def __init__(self, name, account, password, host, port, sender_name='osu!Kafuu', client_type='default', reply_address=None):
     self.name = name
     self.account = account
     self.password = password
     self.host = host
     self.port = port
     self.sender_name = sender_name
     self.client_type = client_type
     self.reply_address = reply_address or account
     logger('增加邮件服务器:{} <{}>'.format(name, account))
Exemple #35
0
 def init_templates(self):
     templates = {}
     count = 0
     for fileName in listdir(self.template_dir):
         if self.template_extension in fileName:
             with open('{}/{}'.format(self.template_dir, fileName), 'r', encoding='utf-8') as template_file:
                 templates[fileName.replace(self.template_extension, '')] = Template(template_file.read())
                 count += 1
     self.templates = templates
     logger('初始化邮件模板({})..成功!'.format(count))
Exemple #36
0
def adaptTime(date):
    logger(['date to adapt', date])
    pattern = '%a, %d %b %Y %H:%M:%S'
    try:
        return int(time.mktime(time.strptime(split_date_str(date),
                                             pattern))) * 1000
    except:
        pattern = '%d %b %Y %H:%M:%S'
        return int(time.mktime(time.strptime(split_date_str(date),
                                             pattern))) * 1000
Exemple #37
0
 def send_mail(self, recipient, title, content, sender_name='', server_name='', template_data={}, lang='cn', tried_servers=[]):
     send_status = -1
     tried_servers_temp = tried_servers if len(tried_servers) > 0 else []
     self.translator.language = lang
     if len(tried_servers) == len(self.mail_servers):
         logger('已达到最大重试次数..放弃此次发信任务!')
         return send_status, self.translator.tran('mail.errors.service_unavailable')
     
     if server_name: 
         server = [s for s in self.mail_servers if s.name == server_name][0]
     else: 
         if len(tried_servers) > 0: 
             logger('发信失败,更换服务器重试... [当前重试次数({})]'.format(len(tried_servers_temp)))
             server = [s for s in self.mail_servers if s.name not in tried_servers][0] 
         else: server = self.mail_servers[0]
         
     client = smtplib.SMTP_SSL(host=server.host) if server.port == 465 or server.client_type == 'ssl' else smtplib.SMTP(host=server.host)
     temp = self.templates.get(content)
     if temp != None:
         content = self.template_render(temp, template_data)
     
     msg = MIMEText(content, 'html', 'utf-8')
     msg['Subject'] = Header(title, 'utf-8')
     msg['From'] = '{} <{}>'.format(sender_name or server.sender_name, server.account)
     msg['To'] = '{} <{}>'.format('osu!Kafuu User', recipient)
     msg['Reply-to'] = server.reply_address
     
     try:
         client.connect(server.host, server.port)
         client.login(server.account, server.password)
         client.sendmail(server.account, [recipient], msg.as_string())
         client.quit()
         send_status = 1
         re_msg = self.send_complete(server, recipient, status=1)
         
     except smtplib.SMTPConnectError as err:
         re_msg = self.send_complete(server, recipient, self.send_error(0), error=err)
     except smtplib.SMTPAuthenticationError as err:
         re_msg = self.send_complete(server, recipient, self.send_error(1), error=err)
     except smtplib.SMTPSenderRefused as err:
         re_msg = self.send_complete(server, recipient, self.send_error(2), error=err)
     except smtplib.SMTPRecipientsRefused as err:
         re_msg = self.send_complete(server, recipient, self.send_error(3), error=err)
     except smtplib.SMTPDataError as err:
         re_msg = self.send_complete(server, recipient, self.send_error(4), error=err)
     except smtplib.SMTPException as err:
         re_msg = self.send_complete(server, recipient, self.send_error(5, reason=err), error=err)
     except Exception as err:
         re_msg = self.send_complete(server, recipient, self.send_error(5, reason=err), error=err)
     
     if send_status == -1:
         tried_servers_temp.append(server.name)
         send_status, re_msg = self.send_mail(recipient, title, content, sender_name, server_name, template_data, lang, tried_servers_temp)
     
     return send_status, re_msg
 def get_koinex_stats(self) -> None:
     logger("################ Koinex Statastics #################")
     stats_dict = self.koinex_client.get_market_stats()
     for coin in stats_dict:
         max_value = stats_dict.get(coin).get('max_24hrs')
         min_value = stats_dict.get(coin).get('min_24hrs')
         avg_value = (float(min_value) + float(max_value)) / 2
         print("{coin_name}:".format(coin_name=coin) +
               " 24-Highest: {highest},".format(highest=max_value) +
               " 24-Lowest: {lowest},".format(lowest=min_value) +
               "Average price: {average}".format(average=avg_value))
Exemple #39
0
 def _generate_help_list(self):
     logger('methods', [attr[0] for attr in getmembers(self, predicate=ismethod)])
     command_functions = [attr[1] for attr in getmembers(self, predicate=ismethod) if attr[0][:8] == 'command_' and
                          attr[0] not in self.skip_in_help]
     help_message = ''
     for command_function in command_functions:
         if command_function.__doc__ is not None:
             help_message += '  /' + command_function.__name__[8:] + ' - ' + command_function.__doc__ + '\n'
         else:
             help_message += '  /' + command_function.__name__[8:] + ' - ' + '\n'
     return help_message
Exemple #40
0
 def get_updates(self, *args, index, offset, **kwargs):
     try:
         temp = self.bot.getUpdates(*args, offset=offset, **kwargs)
     except Exception as e:
         temp = []
         logger('because an error occoured updates will be empty id:', index, type(e), e.args, e)
     if self._getupdates_can_write[index]:
         self._last_updates = temp
     else:
         logger('error get_updates done. but not able to send output.', index)
     return temp
Exemple #41
0
def create_vm_worker(rg_name, vm_name, location, username, password, vm_type,
                     nic_id, subscription_id, image_name, project):
    con = create_db_con()
    client_id = Project.objects(name=project)[0]['client_id']
    secret = Project.objects(name=project)[0]['secret']
    tenant_id = Project.objects(name=project)[0]['tenant_id']
    subscription_id = Project.objects(name=project)[0]['subscription_id']
    creds = ServicePrincipalCredentials(client_id=client_id,
                                        secret=secret,
                                        tenant=tenant_id)
    compute_client = ComputeManagementClient(creds, subscription_id)
    con.close()
    print(
        "Provisioning virtual machine {vm_name}; this operation might take a few minutes."
    )
    print(nic_id)
    poller = compute_client.virtual_machines.create_or_update(
        rg_name, vm_name, {
            "location": location,
            "storage_profile": {
                "image_reference": {
                    'id':
                    '/subscriptions/' + subscription_id + '/resourceGroups/' +
                    rg_name + '/providers/Microsoft.Compute/images/' +
                    image_name
                }
            },
            "hardware_profile": {
                "vm_size": vm_type
            },
            "os_profile": {
                "computer_name": vm_name,
                "admin_username": username,
                "admin_password": password
            },
            "network_profile": {
                "network_interfaces": [{
                    "id": nic_id,
                }]
            }
        })

    vm_result = poller.result()
    print("Provisioned virtual machine")
    try:
        con = create_db_con()
        BluePrint.objects(project=project,
                          image_id=image_name).update(vm_id=vm_result.name,
                                                      status='100')
    except Exception as e:
        print("VM creation updation failed: " + repr(e))
        logger("VM creation updation failed: " + repr(e), "warning")
    finally:
        con.close()
            def probs_metric(inverse=False):
                rand_p = Vec2(random()*table.width+table.min_point.x, random()*table.height+table.min_point.y)
                try:
                    bestmeaning, bestsentence = generate_sentence(rand_p, False, scene, speaker, usebest=True, golden=inverse, printing=printing)
                    sampled_landmark, sampled_relation = bestmeaning.args[0], bestmeaning.args[3]
                    golden_posteriors = get_all_sentence_posteriors(bestsentence, meanings, golden=(not inverse), printing=printing)

                    # lmk_prior = speaker.get_landmark_probability(sampled_landmark, landmarks, PointRepresentation(rand_p))[0]
                    all_lmk_probs = speaker.all_landmark_probs(landmarks, Landmark(None, PointRepresentation(rand_p), None))
                    all_lmk_probs = dict(zip(landmarks, all_lmk_probs))

                    lmk_prior = all_lmk_probs[sampled_landmark]
                    head_on = speaker.get_head_on_viewpoint(sampled_landmark)
                    rel_prior = speaker.get_probabilities_points( np.array([rand_p]), sampled_relation, head_on, sampled_landmark)
                    lmk_post = golden_posteriors[sampled_landmark]
                    rel_post = golden_posteriors[sampled_relation]

                    ps = np.array([golden_posteriors[lmk]*golden_posteriors[rel] for lmk, rel in meanings])
                    rank = None
                    for i,p in enumerate(ps):
                        lmk,rel = meanings[i]
                        # logger( '%f, %s' % (p, m2s(lmk,rel)))
                        head_on = speaker.get_head_on_viewpoint(lmk)
                        # ps[i] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0]
                        ps[i] *= all_lmk_probs[lmk]
                        ps[i] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk)
                        if lmk == sampled_landmark and rel == sampled_relation:
                            idx = i

                    ps += epsilon
                    ps = ps/ps.sum()
                    prob = ps[idx]
                    rank = sorted(ps, reverse=True).index(prob)
                    entropy = entropy_of_probs(ps)
                except (ParseError,RuntimeError) as e:
                    logger( e )
                    lmk_prior = 0
                    rel_prior = 0
                    lmk_post = 0
                    rel_post = 0
                    prob = 0
                    rank = len(meanings)-1
                    entropy = 0
                    distances = [[None]]

                head_on = speaker.get_head_on_viewpoint(sampled_landmark)
                all_descs = speaker.get_all_meaning_descriptions(trajector, scene, sampled_landmark, sampled_relation, head_on, 1)
                distances = []
                for desc in all_descs:
                    distances.append([edit_distance( bestsentence, desc ), desc])
                distances.sort()
                return lmk_prior,rel_prior,lmk_post,rel_post,\
                       prob,entropy,rank,distances[0][0],type(sampled_relation)
Exemple #43
0
def parse_mail_list(fetched_mails):
    cnt = 0  # hack in iterating, array type [mail, flags....]
    mails = []
    for mail in fetched_mails:
        # toPrint = if mail[0]: mail[0]
        # only store 0,2,4..
        if cnt % 2 != 1 and (cnt is not len(fetched_mails) - 1 and re.search(
                '\\Seen', fetched_mails[cnt + 1]) is not None):
            logger(['mail to parse', mail[0]])
            mails.append(parse_mail(mail[1]))
        cnt += 1
    return mails
def _load_checkpoint_from_file(config):
    abs_path_ckpt = os.path.abspath(config.checkpoint_file)

    # Return checkpoint if valid
    if os.path.isfile(abs_path_ckpt):
        try:
            checkpoint = torch.load(abs_path_ckpt)
            return checkpoint
        except Exception as e:
            logger(f"Failed with exception {e}.")
    else:
        raise RuntimeError("Please specify a PyTorch checkpoint file.")
def accept_correction( meaning, correction, update_func='geometric', update_scale=10 ):
    (lmk, lmk_prob, lmk_ent,
     rel, rel_prob, rel_ent,
     rel_exp_chain, rele_prob_chain, rele_ent_chain, rel_terminals, rel_landmarks,
     lmk_exp_chain, lmke_prob_chain, lmke_ent_chain, lmk_terminals, lmk_landmarks,
     rel_words, relw_prob, relw_ent,
     lmk_words, lmkw_prob, lmkw_ent) = meaning.args

    old_meaning_prob, old_meaning_entropy, lrpc, tps = get_sentence_meaning_likelihood( correction, lmk, rel )

    update = update_funcs[update_func](lmk_prob * rel_prob, old_meaning_prob, lmk_ent + rel_ent, old_meaning_entropy) * update_scale

    logger('Update functions is %s and update value is: %f' % (update_func, update))
    # print 'lmk_prob, lmk_ent, rel_prob, rel_ent, old_meaning_prob, old_meaning_entropy, update', lmk_prob, lmk_ent, rel_prob, rel_ent, old_meaning_prob, old_meaning_entropy, update
    # print lmk.object_class, type(rel)

    dec_update = -update

    for lhs,rhs,parent,_ in rel_exp_chain:
        # print 'Decrementing production - lhs: %s, rhs: %s, parent: %s' % (lhs,rhs,parent)
        update_expansion_counts( dec_update, lhs, rhs, parent, rel=rel )

    for lhs,rhs,parent,lmk in lmk_exp_chain:
        # print 'Decrementing production - lhs: %s, rhs: %s, parent: %s' % (lhs,rhs,parent)
        update_expansion_counts( dec_update, lhs, rhs, parent, lmk_class=(lmk.object_class if lmk else None),
                                                               lmk_ori_rels=get_lmk_ori_rels_str(lmk),
                                                               lmk_color=(lmk.color if lmk else None) )

    for term,word in zip(rel_terminals,rel_words):
        # print 'Decrementing word - pos: %s, word: %s, rel: %s' % (term, word, rel)
        update_word_counts( dec_update, term, word, rel=rel )

    for term,word,lmk in zip(lmk_terminals,lmk_words,lmk_landmarks):
        # print 'Decrementing word - pos: %s, word: %s, lmk_class: %s' % (term, word, lmk.object_class)
        update_word_counts( dec_update, term, word, lmk_class=lmk.object_class,
                                                    lmk_ori_rels=get_lmk_ori_rels_str(lmk),
                                                    lmk_color=(lmk.color if lmk else None) )

    # reward new words with old meaning
    for lhs,rhs,parent,lmk,rel in lrpc:
        # print 'Incrementing production - lhs: %s, rhs: %s, parent: %s' % (lhs,rhs,parent)
        update_expansion_counts( update, lhs, rhs, parent, rel=rel,
                                                           lmk_class=(lmk.object_class if lmk else None),
                                                           lmk_ori_rels=get_lmk_ori_rels_str(lmk),
                                                           lmk_color=(lmk.color if lmk else None) )

    for lhs,rhs,lmk,rel in tps:
        # print 'Incrementing word - pos: %s, word: %s, lmk_class: %s' % (lhs, rhs, (lmk.object_class if lmk else None) )
        update_word_counts( update, lhs, rhs, lmk_class=(lmk.object_class if lmk else None),
                                              rel=rel,
                                              lmk_ori_rels=get_lmk_ori_rels_str(lmk),
                                              lmk_color=(lmk.color if lmk else None) )
Exemple #46
0
def make_diffstring(content_ab, separator):
    raw_text_input_a, raw_text_input_b = content_ab
    text_input_a = raw_text_input_a.split(separator)
    text_input_b = raw_text_input_b.split(separator)
    
    # http://docs.python.org/library/difflib.html     
    diff_object = difflib.HtmlDiff(wrapcolumn=87)
    diff_string = diff_object.make_table( text_input_a, text_input_b)

    if not type(diff_string) == unicode:
        logger('make_table failed')
        return

    return ''.join(diff_string)
def E():
    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 3))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        landmarkGt = landmarkGt[:3, :]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = EN(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error
Exemple #48
0
        def heatmaps_for_sentences(sentences, all_meanings, loi_infos, xs, ys, scene, speaker, step=0.02):
            printing=False
            x = np.array( [list(xs-step*0.5)]*len(ys) )
            y = np.array( [list(ys-step*0.5)]*len(xs) ).T
            scene_bb = scene.get_bounding_box()
            scene_bb = scene_bb.inflate( Vec2(scene_bb.width*0.5,scene_bb.height*0.5) )

            combined_heatmaps = []
            for obj_lmk, ms, heatmapss in loi_infos:

                combined_heatmap = None
                for sentence in sentences:
                    posteriors = None
                    while not posteriors:
                        try:
                            posteriors = get_all_sentence_posteriors(sentence, all_meanings, printing=printing)
                        except ParseError as pe:
                            raise pe
                        except Exception as e:
                            print e
                            sleeptime = random()*0.5
                            logger('Sleeping for %f and retrying "%s"' % (sleeptime,sentence))
                            time.sleep(sleeptime)
                            continue

                    big_heatmap1 = None
                    for m,(h1,h2) in zip(ms, heatmapss):

                        lmk,rel = m
                        p = posteriors[rel]*posteriors[lmk]
                        if big_heatmap1 is None:
                            big_heatmap1 = p*h1
                        else:
                            big_heatmap1 += p*h1

                    if combined_heatmap is None:
                        combined_heatmap = big_heatmap1
                    else:
                        combined_heatmap *= big_heatmap1

                combined_heatmaps.append(combined_heatmap)

            return combined_heatmaps
def main(args):
	print args['-config']
	outfile = args['-out']
	config = json2.loadf(args['-config'])
	xml_validator = XMLValidator(config["sra"]["sample"])
	ihec_validators = cmn.safedict([(schema["version"] ,  JsonSchema(schema["schema"], args)) for schema in config["ihec"]["sample"]])
	
	objtype = 'SAMPLE'
	objset = 'SAMPLE_SET'

	validated = list()
	xmllist = args.args()
	nObjs = 0
	for e in xmllist:
		sra = SRAParseObjSet.from_file(e)
		nObjs += sra.nOffspring()
		assert  sra.xml.getroot().tag  == objset, ['__Expected:' + objset]
		assert sra.is_valid__xml(xml_validator) or args.has('-not-sra-xml-but-try')
		v = SampleValidator(sra, ihec_validators)
		validated.extend(v.is_valid_ihec())

	versioned_xml = ['<{0}>'.format(objset) ]
	for e in validated:
		(version, xml) = e
		sra_versioned = SRAParseObj(xml)
		sra_versioned.add_attribute("VALIDATED_AGAINST_METADATA_SPEC", "{0}/{1}".format(version, objtype))
		versioned_xml.append(sra_versioned.tostring())
	versioned_xml.append('</{0}>'.format(objset))


	validated_xml_file = cmn.writel(outfile, versioned_xml)
	print 'written:' + validated_xml_file
	print 'validated:', len(validated)
	print 'failed:', nObjs - len(validated)
	
	if validated:
		validated_xml_set = SRAParseObjSet.from_file(validated_xml_file)
		assert validated_xml_set.is_valid__xml(xml_validator)  or args.has("-skip-updated-xml-validation")
		logger('ok\n')
	else:
		logger('..no valid objects found\n')
Exemple #50
0
            def probs_metric():
                meaning, sentence = generate_sentence(rand_p, consistent, scene, speaker, usebest=True, printing=printing)
                sampled_landmark,sampled_relation = meaning.args[0],meaning.args[3]
                print meaning.args[0],meaning.args[3], len(sentence)
                if sentence == "":
                    prob = 0
                    entropy = 0
                else:
                    logger( 'Generated sentence: %s' % sentence)
                    try:
                        golden_posteriors = get_all_sentence_posteriors(sentence, meanings, golden=True, printing=printing)
                        epsilon = 1e-15
                        ps = np.array([golden_posteriors[lmk]*golden_posteriors[rel] for lmk, rel in meanings])
                        temp = None
                        for i,p in enumerate(ps):
                            lmk,rel = meanings[i]
                            # logger( '%f, %s' % (p, m2s(lmk,rel)))
                            head_on = speaker.get_head_on_viewpoint(lmk)
                            ps[i] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0]
                            ps[i] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk)
                            if lmk == meaning.args[0] and rel == meaning.args[3]:
                                temp = i

                        ps += epsilon
                        ps = ps/ps.sum()
                        prob = ps[temp]
                        rank = sorted(ps, reverse=True).index(prob)
                        entropy = entropy_of_probs(ps)
                    except ParseError as e:
                        logger( e )
                        prob = 0
                        rank = len(meanings)-1
                        entropy = 0

                head_on = speaker.get_head_on_viewpoint(sampled_landmark)
                all_descs = speaker.get_all_meaning_descriptions(trajector, scene, sampled_landmark, sampled_relation, head_on, 1)
                distances = []
                for desc in all_descs:
                    distances.append([edit_distance( sentence, desc ), desc])
                distances.sort()
                return prob,entropy,rank,distances[0][0]
Exemple #51
0
 def ready(self):
     print '############initialization###############'
     log=logger()
     #m=monitor(log=log,name='test1',interval=10,mailInterval=10,smsInterval=30,smsBegin=1,smsEnd=1,describe=1,contacts=1,retryTimes=3,url='http://www.baidu.com',timeout=1000,retryInterval=5,status=True,statusCode=200)
     #m.start()
     from models import items
     result=items.objects.all()
     for i in result:
         m=monitor(logger=log,id=i.id,name=i.name,interval=i.interval,mailInterval=i.mailInterval,smsInterval=i.smsInterval,smsBegin=i.smsBegin,smsEnd=i.smsEnd,retryTimes=i.retryTimes,describe=i.describe,url=i.url,timeout=i.timeout,retryInterval=i.retryInterval,contacts=i.contacts,status=i.status,statusCode=i.statusCode)
         m.setName(i.name)
         m.start()
         
Exemple #52
0
            def choosing_object_metric():
                trajector = choice(loi)

                sentence, sampled_relation, sampled_landmark = speaker.describe(trajector, scene, max_level=1)

                lmk_probs = []
                try:
                    combined_heatmaps = heatmaps_for_sentence(sentence, all_meanings, loi_infos, xs, ys, scene, speaker, step=step)
                    
                    for combined_heatmap,obj_lmk in zip(combined_heatmaps, loi):
                        ps = [p for (x,y),p in zip(list(product(xs,ys)),combined_heatmap) if obj_lmk.representation.contains_point( Vec2(x,y) )]
                        # print ps, xs.shape, ys.shape, combined_heatmap.shape
                        lmk_probs.append( (sum(ps)/len(ps), obj_lmk) )
                      
                    lmk_probs = sorted(lmk_probs, reverse=True)
                    top_p, top_lmk = lmk_probs[0]
                    lprobs, lmkss = zip(*lmk_probs)
                    
                    logger( sorted(zip(np.array(lprobs)/sum(lprobs), [(l.name, l.color, l.object_class) for l in lmkss]), reverse=True) )
                    logger( 'I bet %f you are talking about a %s %s %s' % (top_p/sum(lprobs), top_lmk.name, top_lmk.color, top_lmk.object_class) )
                    # objects.append(top_lmk)
                except Exception as e:
                    logger( 'Unable to get object from sentence. %s' % e, 'fail' )
                    print traceback.format_exc()
                    exit()
                return loi.index(trajector), [ (lprob, loi.index(lmk)) for lprob,lmk in lmk_probs ]
def generate(ftxt, mode, argument=False):
    """
        Generate Training Data for LEVEL-2
        mode = train or test
    """
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.05)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s'%name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File('train/2_%s/%s.h5'%(name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open('train/2_%s/%s.txt'%(name, mode), 'w') as fd:
            fd.write('train/2_%s/%s.h5'%(name, mode))
Exemple #54
0
 def scan_once(self, last_update_id, update_timeout):
     bot_name = self.bot.username
     self._getupdates_can_write.append(True)
     get_updates_index = len(self._getupdates_can_write) - 1
     get_updates_thread = threading.Thread(target=self.get_updates,
                                           kwargs={'index': get_updates_index,
                                                   'offset': last_update_id})
     get_updates_thread.start()
     get_updates_thread.join(timeout=update_timeout)
     if get_updates_thread.isAlive():
         logger('ERROR getupdates timed out, using empty list')
         self._getupdates_can_write[get_updates_index] = False
         self._last_updates = []
     updates = self._last_updates
     for update in updates:
         last_update_id = update.update_id + 1
         message = update.message
         if message.text[0] == '/':
             command, username = message.text.split(' ')[0], bot_name
             if '@' in command:
                 command, username = command.split('@')
             if username == bot_name:
                 events['on_command'].trigger(update.message)
Exemple #55
0
def get_raw_url_from_gist_id(gist_id, gist_name_propper=None):
    gist_id = str(gist_id)
    url = 'https://api.github.com/gists/' + gist_id
    
    found_json = urllib.urlopen(url).read()
    wfile = json.JSONDecoder()
    wjson = wfile.decode(found_json)

    files_flag = 'files'
    file_names = wjson[files_flag].keys()

    logger(file_names)
    logger(gist_name_propper)

    if not gist_name_propper:
        file_name = file_names[0]
    else:
        # this is a little crude.
        if gist_name_propper.startswith('file_'):
            gist_name_propper = gist_name_propper[5:]
        file_name = gist_name_propper

    return wjson[files_flag][file_name]['raw_url']
Exemple #56
0
	def validate(self, jsonObj, details):
		try:
			jsonschema.validate(jsonObj, self.schema, format_checker=jsonschema.FormatChecker())
			return True
		except jsonschema.ValidationError as err:
			if self.verbose:
				self.errs.append(err)
				logfile = self.errlog(len(self.errs),  self.obj_id(details))
				with open(logfile, "w") as errs:
					context_size = len(err.context)
					if context_size > 0:
						errs.write('Multiple sub-schemas can apply. This is what prevents successful validation in each:\n')
						prev_schema = -1
						for suberror in sorted(err.context, key=lambda e: e.schema_path):
							schema_index = suberror.schema_path[0]
							if prev_schema < schema_index:
								errs.write('Schema %d:\n' % (schema_index + 1))
								prev_schema = schema_index
							errs.write('  %s\n' % (suberror.message))
					else:
						errs.write(err.message)

				logger('#__validationFailuresFound: see {0}\n'.format(logfile))
			return False
def generate_sentence(loc, consistent, scene=None, speaker=None, usebest=False, golden=False, meaning=None, printing=True):
    utils.scene = utils.ModelScene(scene, speaker)

    if meaning is None:
        (lmk, lmk_prob, lmk_ent), (rel, rel_prob, rel_ent) = get_meaning(loc=loc, usebest=usebest)
    else:
        lmk, rel = meaning
        lmk_prob = lmk_ent = rel_prob = rel_ent = None
    meaning1 = m2s(lmk, rel)
    logger( meaning1 )

    while True:
        rel_exp_chain, rele_prob_chain, rele_ent_chain, rel_terminals, rel_landmarks = get_expansion('RELATION', rel=rel, usebest=usebest, golden=golden, printing=printing)
        lmk_exp_chain, lmke_prob_chain, lmke_ent_chain, lmk_terminals, lmk_landmarks = get_expansion('LANDMARK-PHRASE', lmk=lmk, usebest=usebest, golden=golden, printing=printing)
        rel_words, relw_prob, relw_ent, rel_a = get_words(rel_terminals, landmarks=rel_landmarks, rel=rel, usebest=usebest, golden=golden, printing=printing)
        lmk_words, lmkw_prob, lmkw_ent, lmk_a = get_words(lmk_terminals, landmarks=lmk_landmarks, prevword=(rel_words[-1] if rel_words else None), usebest=usebest, golden=golden, printing=printing)
        sentence = ' '.join(rel_words + lmk_words)

        if printing: logger( 'rel_exp_chain: %s' % rel_exp_chain )
        if printing: logger( 'lmk_exp_chain: %s' % lmk_exp_chain )

        meaning = Meaning((lmk, lmk_prob, lmk_ent,
                           rel, rel_prob, rel_ent,
                           rel_exp_chain, rele_prob_chain, rele_ent_chain, rel_terminals, rel_landmarks,
                           lmk_exp_chain, lmke_prob_chain, lmke_ent_chain, lmk_terminals, lmk_landmarks,
                           rel_words, relw_prob, relw_ent,
                           lmk_words, lmkw_prob, lmkw_ent))
        meaning.rel_a = rel_a
        meaning.lmk_a = lmk_a

        if consistent:
             # get the most likely meaning for the generated sentence
            try:
                posteriors = get_sentence_posteriors(sentence, iterations=10, extra_meaning=(lmk,rel))
            except:
                print 'try again ...'
                continue

            meaning2 = max(posteriors, key=itemgetter(1))[0]

            # is the original meaning the best one?
            if meaning1 != meaning2:
                print
                print 'sentence:', sentence
                print 'original:', meaning1
                print 'interpreted:', meaning2
                print 'try again ...'
                print
                continue

            for m,p in sorted(posteriors, key=itemgetter(1)):
                print m, p

        return meaning, sentence
	def is_valid_ihec(self):
		validated = list()
		for (xml, attrs) in self.xmljson:
			(version, title_sanitized)  = self.latest_valid_spec(attrs)
			semantics_ok = self.validate_semantics(attrs)
			if version and semantics_ok:
				validated.append((version, xml))
				logger("# is valid ihec spec:{0} version:{1} [{2}]\n".format('True', version, title_sanitized))
			else:
				logger("# is valid ihec spec:{0} version:{1} [{2}]\n".format('False', '__invalid__', title_sanitized))
			if version and not semantics_ok:
				logger("# found a valid spec version but failed semantic validation:{2}\n".format('', '', title_sanitized))
			

				
		return validated
Exemple #59
0
def autocorrect(scene, speaker, num_iterations=1, scale=1000, num_processors=7, num_samples=10, 
                best_samples=False, consistent=False, initial_training=False, cheating=False, 
                explicit_pointing=False, ambiguous_pointing=False, multiply=False,
                golden_metric=True, mass_metric=True, student_metric=True, choosing_metric=True):
    plt.ion()

    assert initial_training + cheating + explicit_pointing + ambiguous_pointing == 1, \
        'Must choose Initial Training, Cheating, Explicit or Ambiguous'

    printing=False

    scene_bb = scene.get_bounding_box()
    scene_bb = scene_bb.inflate( Vec2(scene_bb.width*0.5,scene_bb.height*0.5) )
    table = scene.landmarks['table'].representation.get_geometry()

    step = 0.04
    if choosing_metric:
        loi = [lmk for lmk in scene.landmarks.values() if lmk.name != 'table']
        all_heatmaps_tupless, xs, ys = speaker.generate_all_heatmaps(scene, step=step, loi=loi)

        loi_infos = []
        all_meanings = set()
        for obj_lmk,all_heatmaps_tuples in zip(loi, all_heatmaps_tupless):
            
            lmks, rels, heatmapss = zip(*all_heatmaps_tuples)
            meanings = zip(lmks,rels)
            # print meanings
            all_meanings.update(meanings)
            loi_infos.append( (obj_lmk, meanings, heatmapss) )

    all_heatmaps_tupless, xs, ys = speaker.generate_all_heatmaps(scene, step=step)
    all_heatmaps_tuples = all_heatmaps_tupless[0]
    x = np.array( [list(xs-step*0.5)]*len(ys) )
    y = np.array( [list(ys-step*0.5)]*len(xs) ).T

    # all_heatmaps_tuples = []
    # for lmk, d in all_heatmaps_dict.items():
    #     for rel, heatmaps in d.items():
    #         all_heatmaps_tuples.append( (lmk,rel,heatmaps) )
    # all_heatmaps_tuples = all_heatmaps_tuples[:100]
    lmks, rels, heatmapss = zip(*all_heatmaps_tuples)
    graphmax1 = graphmax2 = 0
    meanings = zip(lmks,rels)
    landmarks = list(set(lmks))
    relations = list(set(rels))

    demo_sentences = ['near to the left edge of the table',
                      'somewhat near to the right edge of the table',
                      'on the table',
                      'on the middle of the table',
                      'at the lower left corner of the table',
                      'far from the purple prism']

    epsilon = 0.0001
    def heatmaps_for_sentence(sentence, all_meanings, loi_infos, xs, ys, scene, speaker, step=0.02):
        printing=False
        scene_bb = scene.get_bounding_box()
        scene_bb = scene_bb.inflate( Vec2(scene_bb.width*0.5,scene_bb.height*0.5) )
        x = np.array( [list(xs-step*0.5)]*len(ys) )
        y = np.array( [list(ys-step*0.5)]*len(xs) ).T

        posteriors = get_all_sentence_posteriors(sentence, all_meanings, printing=printing)

        combined_heatmaps = []
        for obj_lmk, meanings, heatmapss in loi_infos:

            big_heatmap1 = None
            for m,(h1,h2) in zip(meanings, heatmapss):
                lmk,rel = m
                p = posteriors[rel]*posteriors[lmk]
                if big_heatmap1 is None:
                    big_heatmap1 = p*h1
                else:
                    big_heatmap1 += p*h1

            combined_heatmaps.append(big_heatmap1)

        return combined_heatmaps

    def loop(num_iterations):
        min_dists = []
        lmk_priors = []
        rel_priors = []
        lmk_posts = []
        rel_posts = []
        golden_log_probs = []
        golden_entropies = []
        golden_ranks = []
        rel_types = []

        total_mass = []

        student_probs = []
        student_entropies = []
        student_ranks = []
        student_rel_types = []

        object_answers = []
        object_distributions = []

        epsilon = 1e-15
        for iteration in range(num_iterations):
            logger(('Iteration %d' % iteration),'okblue')
            rand_p = Vec2(random()*table.width+table.min_point.x, random()*table.height+table.min_point.y)
            trajector = Landmark( 'point', PointRepresentation(rand_p), None, Landmark.POINT )
            
            if initial_training:

                sentence, sampled_relation, sampled_landmark = speaker.describe(trajector, scene, False, 1)

                if num_samples:
                    for i in range(num_samples):
                        landmark, relation, _ = speaker.sample_meaning(trajector, scene, 1)
                        train((landmark,relation), sentence, update=1, printing=printing)
                else:
                    for (landmark,relation),prob in speaker.all_meaning_probs( trajector, scene, 1 ):
                        train((landmark,relation), sentence, update=prob, printing=printing)

            else:
                meaning, sentence = generate_sentence(rand_p, consistent, scene, speaker, usebest=True, printing=printing)
                logger( 'Generated sentence: %s' % sentence)

                if cheating:
                    landmark, relation = meaning.args[0],meaning.args[3]
                else:
                    if explicit_pointing:
                        landmark = meaning.args[0]
                    if ambiguous_pointing:
                        pointing_point = landmark.representation.middle + Vec2(random()*0.1-0.05,random()*0.1-0.05)
                    #_, bestsentence = generate_sentence(rand_p, consistent, scene, speaker, usebest=True, printing=printing)

                    try:
                        golden_posteriors = get_all_sentence_posteriors(sentence, meanings, golden=True, printing=printing)
                    except ParseError as e:
                        logger( e )
                        prob = 0
                        rank = len(meanings)-1
                        entropy = 0
                        ed = len(sentence)
                        golden_log_probs.append( prob )
                        golden_entropies.append( entropy )
                        golden_ranks.append( rank )
                        min_dists.append( ed )
                        continue
                    epsilon = 1e-15
                    ps = [[golden_posteriors[lmk]*golden_posteriors[rel],(lmk,rel)] for lmk, rel in meanings if ((not explicit_pointing) or lmk == landmark)]

                    if not explicit_pointing:
                        all_lmk_probs = speaker.all_landmark_probs(landmarks, Landmark(None, PointRepresentation(rand_p), None))
                        all_lmk_probs = dict(zip(landmarks, all_lmk_probs))
                    if ambiguous_pointing:
                        all_lmk_pointing_probs = speaker.all_landmark_probs(landmarks, Landmark(None, PointRepresentation(pointing_point), None))
                        all_lmk_pointing_probs = dict(zip(landmarks, all_lmk_pointing_probs))
                    temp = None
                    for i,(p,(lmk,rel)) in enumerate(ps):
                        # lmk,rel = meanings[i]
                        # logger( '%f, %s' % (p, m2s(lmk,rel)))
                        head_on = speaker.get_head_on_viewpoint(lmk)
                        if not explicit_pointing:
                            # ps[i][0] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0]
                            ps[i][0] *= all_lmk_probs[lmk]
                        if ambiguous_pointing:
                            # ps[i][0] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(pointing_point))[0]
                            ps[i][0] *= all_lmk_pointing_probs[lmk]
                        ps[i][0] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk)[0]
                        if lmk == meaning.args[0] and rel == meaning.args[3]:
                            temp = i

                    ps,_meanings = zip(*ps)
                    print ps
                    ps = np.array(ps)
                    ps += epsilon
                    ps = ps/ps.sum()
                    temp = ps[temp]

                    ps = sorted(zip(ps,_meanings),reverse=True)

                    logger( 'Attempted to say: %s' %  m2s(meaning.args[0],meaning.args[3]) )
                    logger( 'Interpreted as: %s' % m2s(ps[0][1][0],ps[0][1][1]) )
                    logger( 'Attempted: %s vs Interpreted: %s' % (str(temp), str(ps[0][0])))

                    # logger( 'Golden entropy: %f, Max entropy %f' % (golden_entropy, max_entropy))

                    landmark, relation = ps[0][1]
                head_on = speaker.get_head_on_viewpoint(landmark)
                all_descs = speaker.get_all_meaning_descriptions(trajector, scene, landmark, relation, head_on, 1)

                distances = []
                for desc in all_descs:
                    distances.append([edit_distance( sentence, desc ), desc])

                distances.sort()
                print distances

                correction = distances[0][1]
                if correction == sentence: 
                    correction = None
                    logger( 'No correction!!!!!!!!!!!!!!!!!!', 'okgreen' )
                accept_correction( meaning, correction, update_scale=scale, eval_lmk=(not explicit_pointing), multiply=multiply, printing=printing )

            def probs_metric(inverse=False):
                bestmeaning, bestsentence = generate_sentence(rand_p, consistent, scene, speaker, usebest=True, golden=inverse, printing=printing)
                sampled_landmark, sampled_relation = bestmeaning.args[0], bestmeaning.args[3]
                try:
                    golden_posteriors = get_all_sentence_posteriors(bestsentence, meanings, golden=(not inverse), printing=printing)

                    # lmk_prior = speaker.get_landmark_probability(sampled_landmark, landmarks, PointRepresentation(rand_p))[0]
                    all_lmk_probs = speaker.all_landmark_probs(landmarks, Landmark(None, PointRepresentation(rand_p), None))
                    all_lmk_probs = dict(zip(landmarks, all_lmk_probs))

                    lmk_prior = all_lmk_probs[sampled_landmark]
                    head_on = speaker.get_head_on_viewpoint(sampled_landmark)
                    rel_prior = speaker.get_probabilities_points( np.array([rand_p]), sampled_relation, head_on, sampled_landmark)
                    lmk_post = golden_posteriors[sampled_landmark]
                    rel_post = golden_posteriors[sampled_relation]

                    ps = np.array([golden_posteriors[lmk]*golden_posteriors[rel] for lmk, rel in meanings])
                    rank = None
                    for i,p in enumerate(ps):
                        lmk,rel = meanings[i]
                        # logger( '%f, %s' % (p, m2s(lmk,rel)))
                        head_on = speaker.get_head_on_viewpoint(lmk)
                        # ps[i] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0]
                        ps[i] *= all_lmk_probs[lmk]
                        ps[i] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk)
                        if lmk == sampled_landmark and rel == sampled_relation:
                            idx = i

                    ps += epsilon
                    ps = ps/ps.sum()
                    prob = ps[idx]
                    rank = sorted(ps, reverse=True).index(prob)
                    entropy = entropy_of_probs(ps)
                except ParseError as e:
                    logger( e )
                    lmk_prior = 0
                    rel_prior = 0
                    lmk_post = 0
                    rel_post = 0
                    prob = 0
                    rank = len(meanings)-1
                    entropy = 0
                    distances = [[None]]

                head_on = speaker.get_head_on_viewpoint(sampled_landmark)
                all_descs = speaker.get_all_meaning_descriptions(trajector, scene, sampled_landmark, sampled_relation, head_on, 1)
                distances = []
                for desc in all_descs:
                    distances.append([edit_distance( bestsentence, desc ), desc])
                distances.sort()
                return lmk_prior,rel_prior,lmk_post,rel_post,\
                       prob,entropy,rank,distances[0][0],type(sampled_relation)

            def db_mass():
                total = CProduction.get_production_sum(None)
                total += CWord.get_word_sum(None)
                return total

            def choosing_object_metric():
                trajector = choice(loi)

                sentence, sampled_relation, sampled_landmark = speaker.describe(trajector, scene, max_level=1)

                lmk_probs = []
                try:
                    combined_heatmaps = heatmaps_for_sentence(sentence, all_meanings, loi_infos, xs, ys, scene, speaker, step=step)
                    
                    for combined_heatmap,obj_lmk in zip(combined_heatmaps, loi):
                        ps = [p for (x,y),p in zip(list(product(xs,ys)),combined_heatmap) if obj_lmk.representation.contains_point( Vec2(x,y) )]
                        # print ps, xs.shape, ys.shape, combined_heatmap.shape
                        lmk_probs.append( (sum(ps)/len(ps), obj_lmk) )
                      
                    lmk_probs = sorted(lmk_probs, reverse=True)
                    top_p, top_lmk = lmk_probs[0]
                    lprobs, lmkss = zip(*lmk_probs)
                    
                    logger( sorted(zip(np.array(lprobs)/sum(lprobs), [(l.name, l.color, l.object_class) for l in lmkss]), reverse=True) )
                    logger( 'I bet %f you are talking about a %s %s %s' % (top_p/sum(lprobs), top_lmk.name, top_lmk.color, top_lmk.object_class) )
                    # objects.append(top_lmk)
                except Exception as e:
                    logger( 'Unable to get object from sentence. %s' % e, 'fail' )
                    print traceback.format_exc()
                    exit()
                return loi.index(trajector), [ (lprob, loi.index(lmk)) for lprob,lmk in lmk_probs ]

            if golden_metric:
                lmk_prior,rel_prior,lmk_post,rel_post,prob,entropy,rank,ed,rel_type = probs_metric()
            else:
                lmk_prior,rel_prior,lmk_post,rel_post,prob,entropy,rank,ed,rel_type = \
                None, None, None, None, None, None, None, None, None

            lmk_priors.append( lmk_prior )
            rel_priors.append( rel_prior )
            lmk_posts.append( lmk_post )
            rel_posts.append( rel_post )
            golden_log_probs.append( prob )
            golden_entropies.append( entropy )
            golden_ranks.append( rank )
            min_dists.append( ed )
            rel_types.append( rel_type )

            if mass_metric:
                total_mass.append( db_mass() )
            else:
                total_mass.append( None )

            if student_metric:
                _,_,_,_,student_prob,student_entropy,student_rank,_,student_rel_type = probs_metric(inverse=True)
            else:
                _,_,_,_,student_prob,student_entropy,student_rank,_,student_rel_type = \
                None, None, None, None, None, None, None, None, None

            student_probs.append( student_prob )
            student_entropies.append( student_entropy )
            student_ranks.append( student_rank )
            student_rel_types.append( student_rel_type )

            if choosing_metric:
                answer, distribution = choosing_object_metric()
            else:
                answer, distribution = None, None
            object_answers.append( answer )
            object_distributions.append( distribution )

        return zip(lmk_priors, rel_priors, lmk_posts, rel_posts,
                   golden_log_probs, golden_entropies, golden_ranks, 
                   min_dists, rel_types, total_mass, student_probs, 
                   student_entropies, student_ranks, student_rel_types,
                   object_answers, object_distributions)

    filename = ''
    if initial_training: filename += 'initial_training'
    if cheating: filename+= 'cheating'
    if explicit_pointing: filename+='explicit_pointing'
    if ambiguous_pointing: filename+='ambiguous_pointing'
    if multiply: filename+='_multiply'
    filename += ('_p%i_n%i_u%i.shelf' % (num_processors,num_iterations,scale))
    import shelve
    f = shelve.open(filename)
    f['lmk_priors']           = []
    f['rel_priors']           = []
    f['lmk_posts']            = []
    f['rel_posts']            = []
    f['golden_log_probs']     = []
    f['golden_entropies']     = []
    f['golden_ranks']         = []
    f['min_dists']            = []
    f['rel_types']            = []
    f['total_mass']           = []
    f['student_probs']        = []
    f['student_entropies']    = []
    f['student_ranks']        = []
    f['student_rel_types']    = []
    f['object_answers']       = []
    f['object_distributions'] = []
    f['initial_training']     = initial_training
    f['cheating']             = cheating
    f['explicit_pointing']    = explicit_pointing
    f['ambiguous_pointing']   = ambiguous_pointing
    f.close()

    chunk_size = 10
    num_each = int(num_iterations/num_processors)
    n = int(num_each / chunk_size)
    extra = num_each % chunk_size
    logger( "num_each: %i, chunk_size: %i, n: %i, extra: %i" % (num_each, chunk_size, n, extra) )

    for i in range(n):
        lists = parmap(loop,[chunk_size]*num_processors)
        # lists = map(loop,[chunk_size]*num_processors)

        result = []
        for i in range(chunk_size):
	        for j in range(num_processors):
	            result.append( lists[j][i] )
        lmk_priors, rel_priors, lmk_posts, rel_posts, \
            golden_log_probs, golden_entropies, golden_ranks, \
            min_dists, rel_types, total_mass, student_probs, student_entropies, \
            student_ranks, student_rel_types, object_answers, object_distributions = zip(*result)
        f = shelve.open(filename)
        f['lmk_priors']           += lmk_priors
        f['rel_priors']           += rel_priors
        f['lmk_posts']            += lmk_posts
        f['rel_posts']            += rel_posts
        f['golden_log_probs']     += golden_log_probs
        f['golden_entropies']     += golden_entropies
        f['golden_ranks']         += golden_ranks
        f['min_dists']            += min_dists
        f['rel_types']            += rel_types
        f['total_mass']           += total_mass
        f['student_probs']        += student_probs
        f['student_entropies']    += student_entropies
        f['student_ranks']        += student_ranks
        f['student_rel_types']    += student_rel_types
        f['object_answers']       += object_answers
        f['object_distributions'] += object_distributions
        f.close()
        
    if extra:
        lists = parmap(loop,[extra]*num_processors)
        # lists = map(loop,[extra]*num_processors)
        result = []
        for i in range(extra):
	        for j in range(num_processors):
	            result.append( lists[j][i] )
        lmk_priors, rel_priors, lmk_posts, rel_posts, \
            golden_log_probs, golden_entropies, golden_ranks, \
            min_dists, rel_types, total_mass, student_probs, student_entropies, \
            student_ranks, student_rel_types, object_answers, object_distributions = zip(*result)
        f = shelve.open(filename)
        f['lmk_priors']           += lmk_priors
        f['rel_priors']           += rel_priors
        f['lmk_posts']            += lmk_posts
        f['rel_posts']            += rel_posts
        f['golden_log_probs']     += golden_log_probs
        f['golden_entropies']     += golden_entropies
        f['golden_ranks']         += golden_ranks
        f['min_dists']            += min_dists
        f['rel_types']            += rel_types
        f['total_mass']           += total_mass
        f['student_probs']        += student_probs
        f['student_entropies']    += student_entropies
        f['student_ranks']        += student_ranks
        f['student_rel_types']    += student_rel_types
        f['object_answers']       += object_answers
        f['object_distributions'] += object_distributions
        f.close()

    exit()