コード例 #1
0
ファイル: get.py プロジェクト: brettviren/garpi
def get_http_ftp(what,url,target,overwrite):
    from urllib2 import Request, urlopen, URLError, HTTPError, ProxyHandler, build_opener, install_opener
    import shutil

    if os.path.exists(target):
        if overwrite:
            log.info('Removing pre-existing file %s'%target)
            shutil.rmtree(target)
        else:
            log.info('Pre-existing file found, not re-getting %s'%target)
            return target

    proxy = os.getenv(what+'_proxy')
    if proxy : 
        proxy_support = ProxyHandler({what:proxy})
        opener = build_opener(proxy_support)
        install_opener(opener)

    #print 'openning',url

    try:
        res = urlopen(url)
    except HTTPError, e:
        print e.__class__, e 
        raise IOError,'Failed to get '+url
コード例 #2
0
ファイル: data.py プロジェクト: DennyZhang/xiaozibao
def list_topic(topic, start_num, count, voteup, votedown, sort_method):
    global db
    conn = db.connect()

    sql_clause = "select postid, category, title, filename from posts "

    where_clause = "where category ='%s'" % (topic)
    extra_where_clause = ""
    if voteup != -1:
        extra_where_clause = "%s and voteup = %d" % (extra_where_clause, voteup)
    if votedown != -1:
        extra_where_clause = "%s and votedown = %d" % (extra_where_clause, votedown)
    where_clause = "%s%s" % (where_clause, extra_where_clause)

    orderby_clause = " "
    if sort_method == config.SORT_METHOD_LATEST:
        orderby_clause = "order by voteup asc, votedown asc"

    if sort_method == config.SORT_METHOD_HOTEST:
        orderby_clause = "order by voteup desc, votedown desc"

    sql = "%s %s %s limit %d offset %d;" % (sql_clause, where_clause, orderby_clause, count, start_num)
    log.info(sql)
    cursor = conn.execute(sql)
    out = cursor.fetchall()
    conn.close()
    user_posts = POST.lists_to_posts(out)
    return user_posts
コード例 #3
0
ファイル: tests.py プロジェクト: facebook/FBSimulatorControl
 def _filter_methods(self, cls, methods):
     log.info('All Tests for {} {}'.format(cls, methods))
     if not self.name_filter:
         return methods
     filtered = [method for method in methods if self.name_filter.lower() in method.lower()]
     log.info('Filtered Tests for {}'.format(cls, filtered))
     return filtered
コード例 #4
0
ファイル: excel_helper.py プロジェクト: EclipseXuLu/LagouJob
def write_excel(joblist, filename):
    """
    write Excel with Workbook
    :param joblist:
    :param filename:
    :return:
    """
    mkdirs_if_not_exists(EXCEL_DIR)
    wb = Workbook()
    ws = wb.active
    ws.title = u"职位信息"
    ws.cell(row=1, column=1).value = u'职位编码'
    ws.cell(row=1, column=2).value = u'职位名称'
    ws.cell(row=1, column=3).value = u'所在城市'
    ws.cell(row=1, column=4).value = u'发布日期'
    ws.cell(row=1, column=5).value = u'薪资待遇'
    ws.cell(row=1, column=6).value = u'公司编码'
    ws.cell(row=1, column=7).value = u'公司名称'
    ws.cell(row=1, column=8).value = u'公司全称'

    rownum = 2
    for each_job in joblist:
        ws.cell(row=rownum, column=1).value = each_job.positionId
        ws.cell(row=rownum, column=2).value = each_job.positionName
        ws.cell(row=rownum, column=3).value = each_job.city
        ws.cell(row=rownum, column=4).value = each_job.createTime
        ws.cell(row=rownum, column=5).value = each_job.salary
        ws.cell(row=rownum, column=6).value = each_job.companyId
        ws.cell(row=rownum, column=7).value = each_job.companyName
        ws.cell(row=rownum, column=8).value = each_job.companyFullName
        rownum += 1
    wb.save(EXCEL_DIR + filename + '.xlsx')
    logging.info('Excel生成成功!')
コード例 #5
0
ファイル: server.py プロジェクト: DennyZhang/xiaozibao
def apple_privacy():
    session.permanent = True # TODO
    log.info("apple_privacy is called")
    content = 'ok'
    resp = make_response(content, 200)
    resp.headers['Content-type'] = 'application/json; charset=utf-8'
    return resp
コード例 #6
0
ファイル: tmgr.py プロジェクト: h2020-endeavour/iSDX
def delay (args):
    if len(args) == 1:
        try:
            log.info('MM:00 DELAY ' + args[0])
            time.sleep(float(args[0]))
        except Exception, e:
            log.error('MM:00 ERROR: DELAY: exception: ' + repr(e))
コード例 #7
0
ファイル: gaudi.py プロジェクト: brettviren/garpi
    def _download_git_submodules(self):
        'If gaudi is served from a git repo with a submodule per pacakge'
        url = self.url()
        if url[4] == '+': url = url[4:]
        log.info(self.name +' download')

        # Get super project
        self._git_clone(url,True)

        # Get release package
        self._git_checkout(self.tag(),self.rel_pkg())
        
        self.init_project(['lcgcmt'])

        # Get versions
        import cmt
        pkg_dir = os.path.join(self.proj_dir()+'/'+self.rel_pkg())
        uses = cmt.get_uses(pkg_dir,self.env(pkg_dir))
        for use in uses:
            #print 'use:',use.name,use.project,use.directory,use.version
            if use.project == 'gaudi' and use.directory == '':
                if '*' in use.version:
                    log.info('Skipping %s %s'%(use.name,use.version))
                    continue
                self._git_checkout(use.version,use.name)
                pass
            continue
        return
コード例 #8
0
ファイル: ice_case.py プロジェクト: hqzxsc/interface_xls
def check_returncode(self,recv,args):
    print 'verify code value,expect:%s,actual:%s'%(int(args.get('ExpectResult')),int(recv[1].get('CODE')))
    log.info('验证code的值,期望值:%s,实际值:%s'%(int(args.get('ExpectResult')),int(recv[1].get('CODE'))))
    print 'The Interface return is: '
    print recv
    print "The data I want to see is: %s" %(recv[1].get('sporderid'))
    self.assertEqual(int(args.get('ExpectResult')),int(recv[1].get('CODE')))
コード例 #9
0
ファイル: video.py プロジェクト: kochbeck/videocleaner
 def _renameSubtitles(self):
     """ Rename the Subtitle files. """
     includeCount = len(self.curFileNames) >= 2
     if (self.subtitles):
         for i in range(len(self.subtitles)):
             subPath = self.subtitles[i]
             newFilePrefix = self.newFileNames[i][0:-4]
             # Make sure the subtitle directory exists
             newSubDirPath = "%s/subtitles" % (self.dirPath)
             if (not os.path.exists(newSubDirPath)):
                 log.info("  >> Creating Dir: %s" % newSubDirPath)
                 os.mkdir(newSubDirPath, 0755)
             # Rename SRT Files
             if (subPath.lower().endswith('.srt')):
                 curSrtPath = "%s/%s" % (self.dirPath, subPath)
                 newSrtPath = "%s/%s.srt" % (newSubDirPath, newFilePrefix)
                 self._rename(curSrtPath, newSrtPath)
             # Rename IDX, SUB Files
             elif (subPath.lower().endswith('.idx')):
                 curIdxPath = "%s/%s" % (self.dirPath, subPath)
                 curSubPath = "%s.sub" % (curIdxPath[0:-4])
                 newIdxPath = "%s/%s.idx" % (newSubDirPath, newFilePrefix)
                 newSubPath = "%s/%s.sub" % (newSubDirPath, newFilePrefix)
                 self._rename(curIdxPath, newIdxPath)
                 self._rename(curSubPath, newSubPath)
コード例 #10
0
 def testBootsDirectly(self):
     if self.metal.is_supported() is False:
         log.info('Metal not supported, skipping testBootsDirectly')
         return
     simulator = self.assertCreatesSimulator([self.device_type])
     self.assertEventSuccesful([simulator.get_udid(), 'boot', '--direct-launch'], 'boot')
     self.assertEventSuccesful([simulator.get_udid(), 'shutdown'], 'shutdown')
コード例 #11
0
 def testRecordsVideo(self):
     if self.metal.is_supported() is False:
         log.info('Metal not supported, skipping testRecordsVideo')
         return
     (simulator, _) = self.testLaunchesSystemApplication()
     arguments = [
         simulator.get_udid(), 'record', 'start',
         '--', 'listen',
         '--', 'record', 'stop',
         '--', 'shutdown',
     ]
     # Launch the process, terminate and confirm teardown is successful
     with self.fbsimctl.launch(arguments) as process:
         process.wait_for_event('listen', 'started')
         process.terminate()
         process.wait_for_event('listen', 'ended')
         process.wait_for_event('shutdown', 'ended')
     # Get the diagnostics
     diagnose_events = self.assertExtractAndKeyDiagnostics(
         self.assertEventsFromRun(
             [simulator.get_udid(), 'diagnose'],
             'diagnostic',
             'discrete',
         ),
     )
     # Confirm the video exists
     video_path = diagnose_events['video']['location']
     self.assertTrue(
         os.path.exists(video_path),
         'Video at path {} should exist'.format(video_path),
     )
コード例 #12
0
ファイル: projects.py プロジェクト: brettviren/garpi
    def download(self):
        '''Download missing or update pre-existing project files.  As
        a side effect the program will be in the projects directory
        that contains the downloaded project'''
        log.info(self.name +' download')
        import fs, ConfigParser
        projdir = fs.projects()
        fs.goto(projdir, True)
        from get import get

        try:
            tag = self.tag()
        except ConfigParser.NoOptionError:
            tag = None

        #print 'url="%s" name="%s" tag="%s"'%(self.url(), self.name, tag)
        get(self.url(), self.name, True, tag=tag)
        tarfile = os.path.basename(self.url())
        if '.tgz' in tarfile or '.tar' in tarfile:
            untar(tarfile)
            dot = tarfile.find('.')
            dirname = tarfile[:dot]
            import shutil
            shutil.move(dirname, self.name)
            pass        

        fs.goback
        return
コード例 #13
0
ファイル: tmgr.py プロジェクト: frankzhao/iSDX
def run (args):
    if len(args) < 2:
        print 'MM:00 EXEC: ERROR usage: exec cmd cmd ...'
        print 'Commands are:'
        for c in sorted(commands):
            print '  ' + c + ': ' + commands[c].get('cmd', '<CMD>')
        return
    
    for i in range(1, len(args)):
        cmdname = args[i]
        try:
            c = commands[cmdname]['cmd']
        except:
            log.error('MM:00 ERROR: EXEC FAILED unknown or poorly specified cmd: ' + cmdname)
            continue
        log.info('MM:00 EXEC: ' + cmdname + ' cmd = ' + c)
        ca = c.split()
        try:
            p = subprocess.Popen(ca, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            out, err = p.communicate()
        except Exception, e:
            out = ''
            err = 'Command Failed: ' + repr(e)
        r = out + err
        log.debug('MM:00 EXEC: ' + cmdname + ' output = \n' + r.strip())
コード例 #14
0
ファイル: post_check.py プロジェクト: DennyZhang/xiaozibao
def post_check():
    log.info("Check post validation for all posts")
    postid_list = get_postid_list()
    status, obj = check_post_list(postid_list)
    if status is False:
        log.error("post check fail, msg: %s" % obj)
        return False
コード例 #15
0
ファイル: movie.py プロジェクト: kochbeck/videocleaner
 def _searchTrailerAddict(self, searchTitle, searchYear):
     """ Search TrailerAddict for a Trailer URL """
     # Search TrailerAddict for the Movie
     log.info("  Searching TrailerAddict for: '%s' (yr: %s)" % (searchTitle, searchYear))
     searchResults = traileraddict.search(searchTitle)
     if (not searchResults):
         log.fine("  TrailerAddict has no search results for: '%s' (yr: %s)" % (searchTitle, searchYear))
         return None
     # Select the correct TrailerAddict Movie
     firstTitle = searchResults[0]['title']
     firstYear = searchResults[0]['year']
     if (firstTitle.lower() == searchTitle.lower()) and (int(firstYear) == searchYear):
         log.fine("  First result is exact match: %s (%s)" % (searchTitle, searchYear))
         searchSelection = searchResults[0]
     else:
         log.fine("  No exact TrailerAddict match found, prompting user")
         choiceStr = lambda r: "%s (%s) - %s" % (r['title'], r['year'], r['url'])
         searchSelection = util.promptUser(searchResults, choiceStr)
     if (not searchSelection):
         log.fine("  TrailerAddict has no entry for: '%s' (yr: %s)" % (searchTitle, searchYear))
         return None
     # Search for the correct Video (Traileraddict has many per movie)
     trailerUrls = traileraddict.getTrailerUrls(searchSelection['url'])
     trailerUrl = traileraddict.getMainTrailer(trailerUrls)
     if (not trailerUrl):
         log.info("  Main trailer not found, prompting user")
         choiceStr = lambda t: t
         trailerUrl = util.promptUser(trailerUrls, choiceStr)
     return trailerUrl
コード例 #16
0
ファイル: movie.py プロジェクト: kochbeck/videocleaner
 def _getImdbUrlFromSearch(self, foreign=False):
     """ Search IMDB for the specified title. """
     # Search IMDB for potential matches
     title = self.curTitle
     year = self.curYear or "NA"
     log.info("  Searching IMDB for: '%s' (yr: %s)" % (title, year))
     results = imdbpy.search_movie(title, IMDB_MAX_RESULTS)
     # Check first 5 Results Title and year matches exactly
     selection = None
     for result in results[0:5]:
         if (self._weakMatch(result['title'], title)) and (int(result['year']) == year):
             log.fine("  Result match: %s (%s)" % (result['title'], result['year']))
             selection = result
             break
     # Ask User to Select Correct Result
     if (not selection):
         log.fine("  No exact IMDB match found, prompting user")
         if (not foreign): choiceStr = lambda r: "%s (%s) - %s" % (r['title'], r['year'], self.getUrl(r.movieID))
         else: choiceStr = lambda r: "%s (%s-%s): %s" % (r['title'], self._getCountry(r), r['year'], self._getAka(r))
         selection = util.promptUser(results, choiceStr)
     # If still no selection, return none
     if (not selection):
         log.fine("  IMDB has no entry for: %s (%s)" % (title, year))
         return None
     return self.getUrl(selection.movieID)
コード例 #17
0
ファイル: ews_case.py プロジェクト: hqzxsc/interface_xls
def check_return(self,r_status,r_data,args):
    if r_status==200:
        if args['ExpectResult']:
            try:
                eresult=json.loads(args['ExpectResult'])
            except Exception, e:
                print e
                print u'请检查excel的ExpectResult列数据格式是不是dict'
            if type(eresult)==dict:
                for key,value in eresult.items():
                    if r_data.has_key(key):
                        if type(eresult[key])==dict:
                            for key1,value1 in eresult[key].items():
                                if r_data[key].has_key(key1):
                                    print u'  验证%s的值,期望值:%s,实际值:%s'%(key1,value1,r_data[key][key1])
                                    log.info(u'  验证%s的值,期望值:%s,实际值:%s'%(key1,value1,r_data[key][key1]))
                                    self.assertEqual(value1,r_data[key][key1])
                                else:
                                    print '返回的接口数据无此%s'%key1
                        else:
                            print u'验证%s的值,期望值:%s,实际值:%s'%(key,value,r_data[key])
                            log.info(u'验证%s的值,期望值:%s,实际值:%s'%(key,value,r_data[key]))
                            self.assertEqual(value,r_data[key])
                    else:
                        print '返回的接口数据无此%s'%key
        else:
            print u'请检查excel的ExpectResult列是否准备了待验证的数据'
コード例 #18
0
ファイル: tmgr.py プロジェクト: ndsc-sdn/iSDX
def listener3(host, bind, port):
    if host not in hosts:
        log.error('MM:00 ERROR: LISTENER: unknown host: ' + host)
        return
    #print 'listener ' + host + ' ' + bind + ' ' + port
    r = generic(host, 'LISTENER', 'listener ' + bind + ' ' + port + '\n')
    if r is not None and len(r) > 0:
        log.info('MM:' + host + ' LISTENER: ' + r.strip())
コード例 #19
0
ファイル: model_saver.py プロジェクト: jeongheo97/tgif-qa
 def save_to_file(self, attr, path):
     param = attr
     with open(path, 'w') as f:
         hkl.dump(self.to_dict(), f)
     path = os.path.splitext(path)[0] + '.json'
     with open(path, 'w') as f:
         json.dump(param, f)
     log.info("Save parame to {}".format(path))
コード例 #20
0
ファイル: tmgr.py プロジェクト: h2020-endeavour/iSDX
def listener3(host, bind, port):
    if host not in hosts:
        log.error('MM:00 ERROR: LISTENER: unknown host: ' + host)
        return
    #print 'listener ' + host + ' ' + bind + ' ' + port
    r = generic(host, 'LISTENER', 'listener ' + bind + ' ' + port + '\n')
    if r is not None and len(r) > 0:
        log.info('MM:' + host + ' LISTENER: ' + r.strip())
コード例 #21
0
    def report(self):
        log.info("Computing scores...")
        total_loss = []

        for id, pred, gt in zip(self._ids, self._predictions, self._groundtruths):
            total_loss.append(self.compute_loss(pred, gt))
        avg_loss = np.average(total_loss)
        log.infov("Average loss : %.4f", avg_loss)
コード例 #22
0
def max_pool(input, info=False, k=2, s=2, padding='SAME', name='pool'):
    _ = tf.nn.max_pool(input,
                       ksize=[1, k, k, 1],
                       strides=[1, s, s, 1],
                       padding=padding,
                       name=name)
    if info: log.info('{} {}'.format(name, _))
    return _
コード例 #23
0
    def try_load_checkpoint(self, model_path):

        ckpt_path = tf.train.latest_checkpoint(model_path)
        assert ckpt_path is not None, '%s is empty' % model_path
        log.info("Checkpoint path: %s", ckpt_path)
        self.saver.restore(self.session, ckpt_path)
        log.info(
            "Loaded the pretrain parameters from the provided checkpoint path")
コード例 #24
0
 def testScreenshot(self):
     if self.metal.is_supported() is False:
         log.info('Metal not supported, skipping testScreenshot')
         return
     simulator = self.assertCreatesSimulator(['iPhone 6'])
     self.assertEventSuccesful([simulator.get_udid(), 'boot'], 'boot')
     with self.launchWebserver() as webserver:
         webserver.get_binary(simulator.get_udid() + '/screenshot.png')
         webserver.get_binary(simulator.get_udid() + '/screenshot.jpeg')
コード例 #25
0
ファイル: health.py プロジェクト: brightpuddle/py-aci-upgrade
def run_checks(client: Client) -> State:
    # Non-zero indicates a gating condition
    for check in enabled_health_checks:
        log.info(f"Checking: {check.__doc__}...")
        state = check(client)
        if state == State.FAIL:
            log.error("Failed on health check", check=check.__doc__)
            return state
    return State.OK
コード例 #26
0
ファイル: tmgr.py プロジェクト: h2020-endeavour/iSDX
def blackholing (args):
    if len(args) < 3:
        log.error('MM:00 EXEC: ERROR usage: blackholing participant_id remove/insert id[,id...]')
        return
    
    part_id = args[0] #participant id
    part_action = args[1] #action insert or remove

    rule_ids = []
    for policy_id in args[2].split(','): #rule ids
        rule_ids.append(int(policy_id)+2**12) #additional 4096 for cookie id
    
    client_path = '/home/vagrant/endeavour/pclnt/participant_client.py'
    config_file = 'participant_' + part_id + '_bh.cfg'

    cmd = ''
    for arg in args:
        cmd += arg + ' '
    log.info('MM:00 BLACKHOLING: ' + cmd + config_file)

    policy_path = os.path.abspath(os.path.join(os.path.realpath(sys.argv[1]), "..", "..", "policies"))
    config_path = os.path.join(policy_path, config_file)

    part_info = config.participants[str(part_id)]

    part_url = 'http://' + str(part_info["EH_SOCKET"][0]) + ':' + str(part_info["EH_SOCKET"][1]) + '/bh/inbound/'
    content_header = {'Content-Type':'application/json'}

    # prepare for insert blackholing policy
    if part_action == 'insert':
        new_policy = []
        # Open File and Parse
        with open(config_path, 'r') as f:
            policies=json.load(f)

            for policy in policies['inbound']: 
                if int(policy['cookie']) in rule_ids:
                    new_policy.append(policy)
        
        # insert only inbound policys
        data = {}
        data['inbound'] = new_policy
        data=json.dumps(data)

        # post to participant api
        r = requests.post(part_url, data=data, headers=content_header)

    # prepare for remove seperate blackholing policy
    elif part_action == 'remove':
    
        for rule_id in rule_ids:
            new_url = part_url + str(rule_id)
            
            # post to participant api
            r = requests.delete(new_url, headers=content_header)
    else:
        log.error('MM:00 EXEC: ERROR usage: error in blackholing - wrong action')
コード例 #27
0
    def __init__(self, config):
        self.config = config
        self.vfeat_path = config.vfeat_path
        self.tf_record_dir = config.tf_record_dir
        self.train_dir = os.path.dirname(config.checkpoint)
        self.vlmap_word_weight_dir = os.path.join(
            self.train_dir, config.vlmap_word_weight_dir.split('/')[-1])
        config.vlmap_word_weight_dir = self.vlmap_word_weight_dir

        self.batch_size = config.batch_size
        with tf.name_scope('datasets'):
            self.target_split = tf.placeholder(tf.string)

        with tf.name_scope('datasets/batch'):
            vqa_batch = {
                'train': input_ops_vqa.create(
                    self.batch_size, self.tf_record_dir, 'train',
                    is_train=True, scope='train_ops', shuffle=True),
                'val': input_ops_vqa.create(
                    self.batch_size, self.tf_record_dir, 'val',
                    is_train=True, scope='val_ops', shuffle=False),
                'testval': input_ops_vqa.create(
                    self.batch_size, self.tf_record_dir, 'testval',
                    is_train=True, scope='testval_ops', shuffle=False),
                'test': input_ops_vqa.create(
                    self.batch_size, self.tf_record_dir, 'test',
                    is_train=True, scope='test_ops', shuffle=False)
            }
            batch_opt = {
                tf.equal(self.target_split, 'train'): lambda: vqa_batch['train'],
                tf.equal(self.target_split, 'val'): lambda: vqa_batch['val'],
                tf.equal(self.target_split, 'testval'): lambda: vqa_batch['testval'],
                tf.equal(self.target_split, 'test'): lambda: vqa_batch['test'],
            }
            self.batch = tf.case(
                batch_opt, default=lambda: vqa_batch['train'], exclusive=True)

        # Model
        Model = self.get_model_class(config.model_type)
        log.infov('using model class: {}'.format(Model))
        self.model = Model(self.batch, config, is_train=True)

        self.checkpoint_loader = tf.train.Saver(max_to_keep=1)

        session_config = tf.ConfigProto(
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(allow_growth=True),
            device_count={'GPU': 1})

        self.session = tf.Session(config=session_config)

        self.ckpt_path = config.checkpoint
        if self.ckpt_path is not None:
            log.info('Checkpoint path: {}'.format(self.ckpt_path))
            self.checkpoint_loader.restore(self.session, self.ckpt_path)
            log.info('Loaded the checkpoint')
        log.warn('Inference initialization is done')
コード例 #28
0
 async def testScreenshot(self):
     if self.metal.is_supported() is False:
         log.info('Metal not supported, skipping testScreenshot')
         return
     simulator = await self.assertCreatesSimulator(['iPhone 6'])
     await self.assertEventSuccesful([simulator.udid, 'boot'], 'boot')
     async with self.launchWebserver() as webserver:
         webserver.get_binary(simulator.udid + '/screenshot.png')
         webserver.get_binary(simulator.udid + '/screenshot.jpeg')
コード例 #29
0
def init(file):
    '''解析excel文件用例'''
    log.info('解析excel文件数据')
    cases_list=[]
    try:
        module_list=open_excel.excel_table_by_index(file)
    except Exception, e:
        print e
        log.error('%s文件打开失败'%file)
コード例 #30
0
def symlink(config, filename):
    src_path = os.path.join(root_dir, config.reference_vqa_dir, filename)
    dst_path = os.path.join(root_dir, config.caption_split_dir, filename)

    if os.path.exists(dst_path):
        log.info("{} already exists".format(dst_path))
    else:
        log.info("Sym link: {}->{}".format(src_path, dst_path))
        os.symlink(src_path, dst_path)
コード例 #31
0
ファイル: tests.py プロジェクト: sherlockZ/FBSimulatorControl
 def testBootsDirectly(self):
     if self.metal.is_supported() is False:
         log.info('Metal not supported, skipping testBootsDirectly')
         return
     simulator = self.assertCreatesSimulator([self.device_type])
     self.assertEventSuccesful(
         [simulator.get_udid(), 'boot', '--direct-launch'], 'boot')
     self.assertEventSuccesful([simulator.get_udid(), 'shutdown'],
                               'shutdown')
コード例 #32
0
 def log_step_message(self, step_result):
     report_msg = ' '.join([
         '{}: {}'.format(k, step_result['report'][k])
         for k in sorted(step_result['report'].keys())
     ])
     msg = '[{}] step {}: [{}]'.format(self.split, step_result['step'],
                                       report_msg)
     log.info(msg)
     return msg
コード例 #33
0
def crawl_company(havemark=0):

    #  定义公司信息为列表形式
    COMPANY_LIST = list()
    # 请求的url
    req_url = 'https://www.lagou.com/gongsi/0-0-0.json?havemark=%d' % havemark
    # 请求头部
    headers = {
        'Accept':
        'application/json, text/javascript, */*; q=0.01',
        'Content-Type':
        'application/x-www-form-urlencoded; charset=UTF-8',
        'Host':
        'www.lagou.com',
        'Origin':
        'https://www.lagou.com',
        'Referer':
        'https://www.lagou.com/gongsi/0-0-0?havemark=0',
        'User-Agent':
        'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 '
        'Mobile/13B143 Safari/601.1'
    }

    #  进行页数循环
    for pn in range(20):
        params = {
            'first': 'false',
            'pn': str(pn),
            'sortField': '0',
            'havemark': str(havemark)
        }

        response = requests.post(req_url,
                                 headers=headers,
                                 params=params,
                                 cookies=m_lagou_spider.get_cookies(),
                                 timeout=10)
        print(response.url)
        if response.status_code == 200:
            company_list_per_page = response.json()['result']
            for company in company_list_per_page:
                COMPANY_LIST.append([
                    company['companyId'], company['companyShortName'],
                    company['city'], company['companyFeatures'],
                    company['companyFullName'], company['financeStage'],
                    company['industryField'], company['interviewRemarkNum'],
                    company['positionNum'], company['processRate']
                ])
            log.info('page %d has been crawled down~' % (pn + 1))
        elif response.status_code == 403:
            log.error('403 forbidden...')
        else:
            log.error(response.status_code)
        # 睡眠
        time.sleep(config.TIME_SLEEP)

    return COMPANY_LIST
コード例 #34
0
 def start(self):
     self._thread = threading.Thread(None, \
                                    self._body, \
                                    self._name,\
                                    (), \
                                    {}  \
     )
     log.info("start thread:" + str(self._name))
     self._thread.start()
コード例 #35
0
ファイル: m_lagou_spider.py プロジェクト: zuolinye/LagouJob1
def crawl_jobs(positionName):
    JOB_DATA = list()
    # 创建max_page_number对象,返回最大页数
    max_page_number = get_max_pageNo(positionName)
    log.info("%s, 共有 %s 页记录, 共约 %s 记录", positionName, max_page_number,
             max_page_number * 15)
    cookies = get_cookies()

    for i in range(1, max_page_number + 1):
        # url请求
        request_url = 'https://m.lagou.com/search.json?city=%E5%85%A8%E5%9B%BD&positionName=' + parse.quote(
            positionName) + '&pageNo=' + str(i) + '&pageSize=15'
        # 请求头部
        headers = {
            'Accept':
            'application/json',
            'Accept-Encoding':
            'gzip, deflate, sdch',
            'Host':
            'm.lagou.com',
            'Referer':
            'https://m.lagou.com/search.html',
            'User-Agent':
            'Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/600.1.3 (KHTML, '
            'like Gecko) Version/8.0 Mobile/12A4345d Safari/600.1.4',
            'X-Requested-With':
            'XMLHttpRequest',
            'Connection':
            'keep-alive'
        }
        # 发送请求
        response = requests.get(request_url, headers=headers, cookies=cookies)
        # 返回判断
        if response.status_code == 200:
            # 在JOB_DATA中添加positionId、positionName、createTime、salary、companyId、companyName、companyFullName
            for each_item in response.json(
            )['content']['data']['page']['result']:
                JOB_DATA.append([
                    each_item['positionId'], each_item['positionName'],
                    each_item['city'], each_item['createTime'],
                    each_item['salary'], each_item['companyId'],
                    each_item['companyName'], each_item['companyFullName']
                ])
                # try:
                # crawl_job_detail(each_item['positionId'], positionName)
                # except:
                #     pass
            print('crawling page %d done...' % i)
            # 休眠
            time.sleep(TIME_SLEEP)
        elif response.status_code == 403:
            log.error('request is forbidden by the server...')
        else:
            log.error(response.status_code)

    return JOB_DATA
コード例 #36
0
ファイル: main.py プロジェクト: jeongheo97/tgif-qa
def init_model():
    task = FLAGS.task

    model_params = {
        "feat_dim": FEAT_DIM,
        "word_embed": train_dataset.word_matrix,
        "lstm_steps": SEQUENCE_LENGTH,
        "architecture": FLAGS.architecture
    }
    if task == 'FrameQA':
        model_params["vocabulary_size"] = len(train_dataset.idx2word)
        model_params["answer_size"] = len(train_dataset.idx2ans)

    model_params.update(FLAGS.__flags)

    if FLAGS.checkpoint_path:
        checkpoint = FLAGS.checkpoint_path
        params_path = os.path.join(
            os.path.dirname(checkpoint),
            '%s_%s_param.hkl' % (FLAGS.task.lower(), FLAGS.name.lower()))
        log.info("Restored parameter set from {}".format(params_path))
        model_params = hkl.load(open(params_path))
        model_params["att_hidden_dim"] = FLAGS.att_hidden_dim
        model_params["hidden_dim"] = FLAGS.hidden_dim

    model = Model.from_dict(model_params)
    model.print_params()

    video = tf.placeholder(tf.float32, [FLAGS.batch_size] +
                           list(train_dataset.get_video_feature_dimension()))
    video_mask = tf.placeholder(tf.float32,
                                [FLAGS.batch_size, SEQUENCE_LENGTH])
    answer = tf.placeholder(tf.int32, [FLAGS.batch_size, 1])

    if (task == 'Count') or (task == 'FrameQA'):
        question = tf.placeholder(tf.int32,
                                  [FLAGS.batch_size, SEQUENCE_LENGTH])
        question_mask = tf.placeholder(tf.int32,
                                       [FLAGS.batch_size, SEQUENCE_LENGTH])
    else:
        question = tf.placeholder(
            tf.int32,
            [FLAGS.batch_size, Model.MULTICHOICE_COUNT, SEQUENCE_LENGTH])
        question_mask = tf.placeholder(
            tf.float32,
            [FLAGS.batch_size, Model.MULTICHOICE_COUNT, SEQUENCE_LENGTH])

    model.build_graph(
        video,
        video_mask,
        question,
        question_mask,
        answer,
        optimizer=tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate))

    return model, model_params
コード例 #37
0
ファイル: model.py プロジェクト: zhl001/Multiview2Novelview
 def Pose_Encoder(input_pose,
                  target_pose,
                  scope='Pose_Encoder',
                  reuse=False):
     with tf.variable_scope(scope) as scope:
         if not reuse: log.warn(scope.name)
         _ = target_pose - input_pose
         if not reuse:
             log.info('{} {}'.format(_.name, _.get_shape().as_list()))
         return _
コード例 #38
0
ファイル: music.py プロジェクト: purkylin/MySpider
def downMusic(url, sid):
	if os.path.exists('mp3/%s.mp3' % sid):
		log.info('Already downloaded')
		return

	r = session.get(url, timeout=30)
	with open('mp3/%s.mp3' % sid, 'wb') as fp:
		fp.write(r.content)

	log.info('Down finish')
コード例 #39
0
ファイル: tmgr.py プロジェクト: sdn-ixp/iSDX
def echo (args):
    host = args[0]
    del args[0]
    all = ''
    for arg in args:
        all += ' ' + '"' + arg + '"'
    log.info('MM:' + host + ' ECHO ' + all)
    r = generic(host, 'ECHO', 'echo ' + all)
    if r is not None:
        log.info('MM:' + host + ' echo = ' + r.strip())
コード例 #40
0
 def log_final_message(self, final_report):
     report_msg = ' '.join([
         '{}: {}'.format(k, final_report[k])
         for k in sorted(final_report.keys())
     ])
     msg = '[{}] Final average report: [{}]'.format(self.split, report_msg)
     log.info(msg)
     with open(self.checkpoint + '.final.txt', 'r') as f:
         f.write(msg)
     return msg
コード例 #41
0
ファイル: video.py プロジェクト: kochbeck/videocleaner
 def _rename(self, src, dst):
     """ Rename the specified file. """
     if (src != dst):
         if (os.path.exists(dst)):
             log.warn("  Path already exists: %s" % dst)
             return None
         log.info("  >> Renaming: %s" % src)
         log.info("           to: %s" % dst)
         os.rename(src, dst)
         
コード例 #42
0
ファイル: git.py プロジェクト: brettviren/garpi
def unpack():
    "Unpack the previously downloaded tarfile"
    log.info("unpacking git source")
    target = srcdir()
    if os.path.exists(target):
        log.info("git appears to already be unpacked in %s" % (target))
        return target
    fs.goto(fs.external(), True)
    untar(tgz())
    return target
コード例 #43
0
ファイル: tmgr.py プロジェクト: huxh10/SGDX
def echo(args):
    host = args[0]
    del args[0]
    all = ''
    for arg in args:
        all += ' ' + '"' + arg + '"'
    log.info('MM:' + host + ' ECHO ' + all)
    r = generic(host, 'ECHO', 'echo ' + all)
    if r is not None:
        log.info('MM:' + host + ' echo = ' + r.strip())
コード例 #44
0
ファイル: ews_case.py プロジェクト: hqzxsc/interface_xls
 def action(self,args):
     log.info(u'执行用例%s'%args['TestCaseName'])
     print args['TestCaseName']+u'用例描述'+u':'+args['Description']
     self.args=args
     #请求数据
     r_status,r_data=request_http(args)
     #验证返回数据
     check_return(self,r_status,r_data,args)
     #验证数据库的值
     check_sql(self,r_data,args)
コード例 #45
0
ファイル: cmt.py プロジェクト: brettviren/garpi
def unpack():
    'Unpack the previously downloaded tarfile'
    log.info('unpacking cmt source')
    target = srcdir()
    if os.path.exists(target):
        log.info('CMT appears to already be unpacked in %s'%(target))
        return target
    fs.goto(fs.external(),True)
    untar(tgz())
    return target
コード例 #46
0
ファイル: crypto.py プロジェクト: tierney/lockbox-py
  def decrypt(self, fpin, fpout, aes_key, salt):
    '''
    Decrypt a fileobject using the given AES key and salt.

    '''
    log.info("Decrypting file...")
    iv = '\0' * 32
    cipher = M2Crypto.EVP.Cipher(alg='aes_256_cbc', key=aes_key, iv=iv, op=DECODE, salt=salt)
    _filter_cipher(fpin, fpout, cipher)
    log.info("Done")
コード例 #47
0
ファイル: cmt.py プロジェクト: brettviren/garpi
def download():
    'Download CMT source tar file into external area.'
    log.info('downloading cmt tar file')
    target = "%s/%s"%(fs.external(),tgz())
    from get import get
    from exception import InconsistentState
    target = get(url(),target)
    if not os.path.exists(target):
        raise InconsistentState,'Tar file does not exist: %s%s'%(os.getcwd(),tgz())
    return target
コード例 #48
0
ファイル: tests.py プロジェクト: sherlockZ/FBSimulatorControl
 def _filter_methods(self, cls, methods):
     log.info('All Tests for {} {}'.format(cls, methods))
     if not self.name_filter:
         return methods
     filtered = [
         method for method in methods
         if self.name_filter.lower() in method.lower()
     ]
     log.info('Filtered Tests for {}'.format(cls, filtered))
     return filtered
コード例 #49
0
    def report(self):
        # report L2 loss
        log.info("Computing scores...")
        score = {}
        score = []

        for id, pred, gt in zip(self._ids, self._predictions, self._groundtruths):
            score.append(self.compute_accuracy(pred, gt))
        avg = np.average(score)
        log.infov("Average accuracy : %.4f", avg*100)
コード例 #50
0
    def __init__(self, data_dir, split, name='vlmap_memft'):
        self.name = name
        self.split = split

        log.warn('loading image_info ..')
        image_info_path = os.path.join(data_dir,
                                       '{}_image_info.pkl'.format(split))
        image_info = cPickle.load(open(image_info_path, 'rb'))
        self._ids = image_info['image_ids']
        self.image_id2idx = image_info['image_id2idx']
        log.info('loading image_info done')

        log.warn('loading processed data ..')
        processed_path = os.path.join(data_dir,
                                      '{}_processed.pkl'.format(split))
        self.processed = cPickle.load(open(processed_path, 'rb'))
        log.info('loading processed done')

        log.warn('loading answer_dict ..')
        answer_dict_path = os.path.join(data_dir, 'answer_dict.pkl')
        self.answer_dict = cPickle.load(open(answer_dict_path, 'rb'))
        self.num_answers = len(self.answer_dict['vocab'])
        log.info('loading answer_dict done')

        log.warn('loading wordset_dict ..')
        ws_dict_path = os.path.join(data_dir, 'wordset_dict5.pkl')
        self.ws_dict = cPickle.load(open(ws_dict_path, 'rb'))
        log.info('loading wordset_dict done')

        log.warn('loading enwiki_context_dict ..')
        enwiki_dict_pkl_path = os.path.join(data_dir,
                                            'enwiki_context_dict_w3_n5.pkl')
        enwiki_dict_h5_path = os.path.join(data_dir,
                                           'enwiki_context_dict_w3_n5.hdf5')
        self.enwiki_dict = cPickle.load(open(enwiki_dict_pkl_path, 'rb'))
        with h5py.File(enwiki_dict_h5_path, 'r') as f:
            self.enwiki_dict['np_context'] = f['np_context'].value
            self.enwiki_dict['np_context_len'] = f['np_context_len'].value

        with h5py.File(os.path.join(data_dir, '{}_vfeat.hdf5'.format(split)),
                       'r') as f:

            self.vfeat_dim = int(f['data_info']['vfeat_dim'].value)
            self.max_box_num = int(f['data_info']['max_box_num'].value)
            log.warn('loading {} image_features ..'.format(split))
            self.image_features = np.array(f.get('image_features'))
            log.warn('loading {} normal_boxes ..'.format(split))
            self.normal_boxes = np.array(f.get('normal_boxes'))
            log.warn('loading {} num_boxes ..'.format(split))
            self.num_boxes = np.array(f.get('num_boxes'))
            log.warn('loading {} spatial_features ..'.format(split))
            self.spatial_features = np.array(f.get('spatial_features'))
            log.warn('loading {} features done ..'.format(split))

        log.info('dataset {} {} init done'.format(name, split))
コード例 #51
0
ファイル: get.py プロジェクト: brettviren/garpi
def get(url,target,overwrite=False,tag=None):
    '''
    Get the file or directory tree at the given URL and place it at
    the given target path.  If overwrite is True and target is
    preexisting it will be overwritten (updated).

    The URL is expected to be in a standard form:

    SCHEME://HOSTNAME/PATH

    The following URL schemes are supported:

    http: - the file given in PATH via HTTP

    ftp: - the file given in PATH via anonymous FTP

    git+TRANSPORT: git-clone a repository.  TRANSPORT can be http,
    rsync, ssh or empty to use native git protocol (the '+' can be
    omitted).  For local repository, use "git+file:///path/to/git".
    See git-clone(1) for details.  If overwriting a git-pull is done.

    svn+TRANSPORT: - perform "svn co" using the remaining URL with
    'svn+' removed.  If overwritting, an "svn update" is done.

    cvs+:TRANSPORT: - perform "cvs co" using the remaining URL with
    'cvs+' removed.  If overwritting, an "cvs update" is done.
    '''

    log.info('Getting url "%s" --> "%s"'%(url,target))

    urlp = uriparse(url)
    if urlp[0] == 'http' or urlp[0] == 'ftp':
        return get_http_ftp(urlp[0],url,target,overwrite)

    scheme = urlp[0].split('+')
    #print 'scheme=',scheme

    print urlp,scheme
    if urlp[0] == 'git' or scheme[0] == 'git':
        return get_git(scheme,url,target,overwrite,tag)

    if scheme[0] == 'svn':
        return get_svn(scheme[1]+'://'+urlp[1]+'/'+urlp[2]+'/'+tag,target,overwrite)
    if scheme[0] == 'cvs':
        # get_cvs(url,module,tag,target,overwrite):
        #print 'CVS: "%s", "%s", "%s"'%(urlp[0],urlp[1],urlp[2])
        url = ':%s:%s:%s'%(scheme[1],urlp[1],'/'.join(urlp[2].split('/')[:-1]))
        module = urlp[2].split('/')[-1]
        #print 'url=%s, module=%s'%(url,module)
        print 'Note: getting from CVS, if this appears to hang, it is waiting for a password'
        return get_cvs(url,module,tag,target,overwrite)

    msg = 'Unhandled URL: "%s"'%url
    log.error(msg)
    raise ValueError, msg
コード例 #52
0
ファイル: main.py プロジェクト: xiao-hanjun/tgif-qa
def init_model():
    task = FLAGS.task

    model_params = {
        "feat_dim": FEAT_DIM,
        "word_embed": train_dataset.word_matrix,
        "lstm_steps": SEQUENCE_LENGTH
    }
    if task == 'FrameQA':
        model_params["vocabulary_size"] = len(train_dataset.idx2word)
        model_params["answer_size"] = len(train_dataset.idx2ans)

    model_params.update(FLAGS.__flags)

    if FLAGS.checkpoint_path:
        checkpoint = FLAGS.checkpoint_path
        params_path = os.path.join(
            os.path.dirname(checkpoint),
            '%s_%s_param.hkl' % (FLAGS.task.lower(), FLAGS.name.lower()))
        log.info("Restored parameter set from {}".format(params_path))
        model_params = hkl.load(open(params_path))

    if FLAGS.test_phase:
        model_params["dropout_keep_prob_cell_input"] = 1.
        model_params["dropout_keep_prob_cell_output"] = 1.
        model_params["dropout_keep_prob_fully_connected"] = 1.
        model_params["dropout_keep_prob_output"] = 1.
        model_params["dropout_keep_prob_image_embed"] = 1.

    model = Model.from_dict(model_params)
    model.print_params()

    video = tf.placeholder(tf.float32, [FLAGS.batch_size] +
                           list(train_dataset.get_video_feature_dimension()))
    video_mask = tf.placeholder(tf.float32,
                                [FLAGS.batch_size, SEQUENCE_LENGTH])
    answer = tf.placeholder(tf.int32, [FLAGS.batch_size, 1])
    train_flag = tf.placeholder(tf.bool)

    if (task == 'Count') or (task == 'FrameQA'):
        question = tf.placeholder(tf.int32,
                                  [FLAGS.batch_size, SEQUENCE_LENGTH])
        question_mask = tf.placeholder(tf.int32,
                                       [FLAGS.batch_size, SEQUENCE_LENGTH])
    else:
        question = tf.placeholder(
            tf.int32,
            [FLAGS.batch_size, Model.MULTICHOICE_COUNT, SEQUENCE_LENGTH])
        question_mask = tf.placeholder(
            tf.float32,
            [FLAGS.batch_size, Model.MULTICHOICE_COUNT, SEQUENCE_LENGTH])

    model.build_graph(video, video_mask, question, question_mask, answer,
                      train_flag)
    return model, model_params
コード例 #53
0
def fc(input,
       output_shape,
       is_train,
       info=False,
       batch_norm=True,
       activation_fn=tf.nn.relu,
       name="fc"):
    _ = slim.fully_connected(input, output_shape, activation_fn=None)
    if info: log.info('{} {}'.format(name, _))
    _ = bn_act(_, is_train, batch_norm=batch_norm, activation_fn=activation_fn)
    return tf.identity(_, name=name)
コード例 #54
0
def symlink(src_path, dst_path):
    root_dir = os.path.dirname(os.path.realpath(__file__))

    src_path = os.path.join(root_dir, src_path)
    dst_path = os.path.join(root_dir, dst_path)

    if os.path.exists(dst_path):
        log.info("{} already exists".format(dst_path))
    else:
        log.info("Sym link: {}->{}".format(src_path, dst_path))
        os.symlink(src_path, dst_path)
def create_input_ops(dataset,
                     batch_size,
                     num_threads=16,
                     is_training=False,
                     data_id=None,
                     scope='input',
                     shuffle=True):
    input_ops = {}

    if data_id is None:
        data_id = dataset.ids

        log.info("input_ops [%s]: Using %d IDs from dataset", scope,
                 len(data_id))
    else:
        log.info("input_ops [%s]: Using specified %d IDs", scope, len(data_id))

    with tf.device("/cpu:0"), tf.name_scope(scope):
        input_ops['id'] = tf.train.string_input_producer(
            tf.convert_to_tensor(data_id),
            capacity=128).dequeue(name='input_ids.dequeue')

        m = dataset.get_data(data_id[0])

        def load_fn(id):

            image = dataset.get_data(id)
            return (id, image.astype(np.float32))

        input_ops['id'], input_ops['image'] = tf.py_func(
            load_fn,
            inp=[input_ops['id']],
            Tout=[tf.string, tf.float32],
            name='func_hp')

        input_ops['id'].set_shape([])
        input_ops['image'].set_shape(list(m.shape))

    capacity = 2 * batch_size * num_threads
    min_capacity = min(int(capacity * 0.75), 1024)

    if shuffle:
        batch_ops = tf.train.shuffle_batch(input_ops,
                                           batch_size,
                                           num_threads=num_threads,
                                           capacity=capacity,
                                           min_after_dequeue=min_capacity)
    else:
        batch_ops = tf.train.batch(input_ops,
                                   batch_size=batch_size,
                                   num_threads=num_threads,
                                   capacity=capacity)

    return input_ops, batch_ops
コード例 #56
0
 def on_websocket_close(self, code: int, reason: str) -> NoReturn:
     log.debug(f"connection {self.sock_id} closed: {code} - {reason}")
     if not self.new_sock_added:
         log.debug(f"going to reconnect to router, webswocket close code: {code}")
         self.ws_client_farm.remove_ws(str(self.register_uri))
         self.when_consumed_action()
         self.new_sock_added = True
     if not self.req_complete and self.req_to_target is not None:
         if code != STATUS_UNEXPECTED_CONDITION:
             log.info(f"websocket closed before the target response was processed, This may be because the user"
                      f"closed therire browser, Going to cancel request to target {self.req_to_target.url}")
             self.req_to_target.abort(Exception("Socket to Router closed"))
コード例 #57
0
def fc(input,
       output_shape,
       is_train,
       info=False,
       norm='batch',
       activation_fn=lrelu,
       name="fc"):
    with tf.variable_scope(name):
        _ = slim.fully_connected(input, output_shape, activation_fn=None)
        _ = bn_act(_, is_train, norm=norm, activation_fn=activation_fn)
        if info: log.info('{} {}'.format(name, _))
    return _
コード例 #58
0
    def __init__(self,
                 config,
                 dataset):
        self.config = config
        self.train_dir = config.train_dir
        log.info("self.train_dir = %s", self.train_dir)

        # --- input ops ---
        self.batch_size = config.batch_size

        self.dataset = dataset

        check_data_id(dataset, config.data_id)
        _, self.batch = create_input_ops(dataset, self.batch_size,
                                         data_id=config.data_id,
                                         is_training=False,
                                         shuffle=False)

        # --- create model ---
        Model = self.get_model_class(config.model)
        log.infov("Using Model class : %s", Model)
        self.model = Model(config)

        self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
        self.step_op = tf.no_op(name='step_no_op')

        tf.set_random_seed(1234)

        session_config = tf.ConfigProto(
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(allow_growth=True),
            device_count={'GPU': 1},
        )
        self.session = tf.Session(config=session_config)

        # --- checkpoint and monitoring ---
        self.saver = tf.train.Saver(max_to_keep=100)

        self.checkpoint_path = config.checkpoint_path
        if self.checkpoint_path is None and self.train_dir:
            self.checkpoint_path = tf.train.latest_checkpoint(self.train_dir)

        if self.checkpoint_path is None:
            log.warn("No checkpoint is given. Just random initialization :-)")
            self.session.run(tf.global_variables_initializer())
        else:
            log.info("Checkpoint path : %s", self.checkpoint_path)

        mean_std = np.load('../DatasetCreation/VG/mean_std.npz')
        self.img_mean = mean_std['img_mean']
        self.img_std = mean_std['img_std']
        self.coords_mean = mean_std['coords_mean']
        self.coords_std = mean_std['coords_std']
コード例 #59
0
def get_out_dir(dataset, checkpoint_path):
    # out_dir
    try:
        checkpoint_step = os.path.basename(checkpoint_path).split('-')[-1]
    except:
        checkpoint_step = 'latest'
    out_dir = os.path.abspath(
        os.path.join(checkpoint_path,
                     '../../generated-{}-{}'.format(dataset, checkpoint_step)))
    mkdir_p(out_dir)
    log.info("Out directory : %s", out_dir)
    return out_dir
コード例 #60
0
ファイル: evaler.py プロジェクト: yanshui177/SSGAN-Tensorflow
    def report(self):
        # report L2 loss
        log.info("Computing scores...")
        score = {}
        score['l2_loss'] = []

        for id, pred, gt in zip(self._ids, self._predictions,
                                self._groundtruths):
            score['l2_loss'].extend(self.compute_l2error(pred, gt))

        avg_l2loss = np.average(score['l2_loss'])
        log.infov("Average L2 loss : %.5f", avg_l2loss)