Beispiel #1
0
def E(level=1):
    if level == 0:
        from common import level1 as P
        P = partial(P, FOnly=True) # high order function, here we only test LEVEL-1 F CNN
    elif level == 1:
        from common import level1 as P
    elif level == 2:
        from common import level2 as P
    else:
        from common import level3 as P

    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 5))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = P(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error
def geneDataTxt(imgPath, landmarks):

    imgTxt = 'dataset/data/testImageList.txt'
    data = getDataFromTxt(imgTxt, False, False)  #image_path,bbox
    trainData = defaultdict(lambda: dict(landmarks=[], patches=[], label=[]))
    logger(
        "generate 25 positive samples and 500 negative samples for per landmark"
    )

    trainData = generateSamples(trainData, data, landmarks)

    arr = []
    for idx, name in types:
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        label = np.asarray(trainData[name]['label'])

        arr1 = landmarks.reshape(1, -1)
        arr.append(arr1)

        patches = processImage(patches)
        shuffle_in_unison_scary(patches, landmarks, label)

        with open('test/%s.txt' % (name), 'w') as fd:
            fd.append(landmarks.astype(np.float32))
            fd.append(patches.astype(np.float32))
            fd.append(label.astype(np.uint8))
def generateSamples(trainData, data, landmarks):
    t = 0
    for (imgPath, bbox) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger('process %s' % imgPath)
        height, width = img.shape[:2]
        #downsampled by 3: 3X3 Patch
        size = (int(width / 3), int(height / 3))
        img = cv2.resize(img, size, interpolation=cv2.INTER_NEAREST)

        trainData, t = getTrainData(trainData, landmarks, img)

        print('After getting raw data,there are %d datas') % t

        r2 = 20 / 3
        r3 = 10  #400 / 3

        for idx, landmark in enumerate(landmarks):
            print '@@@@@@@@@@@@@@' + str(idx)
            # 25 Positive samples
            landmarkPs25 = randomShiftWithArgument(landmark, 0, r2, 25)
            trainData, t = getTrainData(trainData, landmarkPs25, img)

            #print ('After getting 25 positive samples,there are %d datas') % t
            # 500 negative samples
            landmarkNs500 = randomShiftWithArgument(landmark, r2, r3, 500)
            trainData, t = getTrainData(trainData, landmarkNs500, img)

            print(
                'After getting 25 positive and 500 negative samples,there are %d datas'
            ) % t
    return trainData
Beispiel #4
0
def generateSamples(trainData, data):
    t = 0
    print '##########################################################################'
    for (imgPath, landmarkGt, bbox) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)
        height, width = img.shape[:2]
        #downsampled by 3: 3x3 patch
        cephaImg = cv2.resize(img, (int(width / 3), int(height / 3)),
                              interpolation=cv2.INTER_NEAREST)

        #raw data
        #trainData,t = getData(trainData,landmarkGt,cephaImg,t)
        #print ('After getting raw data,there are %d datas') % t

        r1 = 20 / 3
        r2 = 20  #60/3
        r3 = 400  #400/3
        for idx, landmark in enumerate(landmarkGt):  #19个landmark
            # 25 Positive samples
            landmarkPs25 = randomShiftWithArgument(landmark, 0, r1, 25)
            trainData, t = getData(trainData, landmarkPs25, cephaImg, t)
            print('After getting 25 positive samples,there are %d datas') % t
            # 500 negative samples
            landmarkNs500 = randomShiftWithArgument(landmark, r2, r3, 500)
            trainData, t = getData(trainData, landmarkNs500, cephaImg, t)
            print(
                'After getting 25 positive and 500 negative samples,there are %d datas'
            ) % t

            if idx == 1:
                break
    return trainData
Beispiel #5
0
def __send_message(sc, text, channel, thread_ts="", icon_url='ru', emoji='null'):
    # Required slackclient, channel, thread_ts
    # thread_ts if this is a thread response
    # icon_url to override default, set this to 'emoji' override with an emoji
    # emoji set this to a valid emoji and ensure icon_url='emoji'
    try:
        if "emoji" in icon_url:
            res = sc.api_call('chat.postMessage',
                              username=BOT_NAME,
                              icon_emoji=emoji,
                              as_user='******',
                              channel=channel,
                              text=text,
                              unfurl_links='true',
                              thread_ts=thread_ts)
        else:
            res = sc.api_call('chat.postMessage',
                              username=BOT_NAME,
                              icon_url=icon_url,
                              as_user='******',
                              channel=channel,
                              text=text,
                              unfurl_links='true',
                              thread_ts=thread_ts)
        logger('info', res)
        if res['ok'] != "True":
            logger("warn", "Error: __send_message API Error: %s" % res['error'])
        logger('info', res)

    # KeyError is not going through logger, debug it
    except KeyError as e:
        logger("ignore", str(e) + " error in __send_message.")
    except Exception as e:
        logger("warn", "Unknown error in __send_message: " + str(e))
def geneDataTxt(imgPath,landmarkREF,mode = 'test'):
    
    imgTxt = 'dataset/data/testImageList.txt'
    data = getDataFromTxt(imgTxt,False,False)  #image_path,bbox
    testData = defaultdict(lambda:dict(landmarks=[],patches=[],label=[]))
    logger("generate 25 positive samples and 500 negative samples for per landmark")
    
    testData = generateSamples(testData,data,landmarkREF)
    
    for idx,name in types:
        patches = np.asarray(testData[name]['patches'])
        landmarks = np.asarray(testData[name]['landmarks'])
        #label = np.asarray(testData[name]['label'])

        patches = processImage(patches)
        shuffle_in_unison_scary(patches,landmarks)
        
        createDir('dataset/test/%s' % imgPath[-7:-4])
        
        with h5py.File('dataset/test/%s/%s.h5' % (imgPath[-7:-4],name),'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
            #h5['label'] = label.astype(np.uint8)
        with open('dataset/test/%s/%s.txt' % (imgPath[-7:-4],name),'w') as fd:
            fd.write('dataset/test/%s/%s.h5'% (imgPath[-7:-4],name))
        
        '''with open('dataset/test/%s.txt' % (name),'w') as fd:
Beispiel #7
0
def E(level=1):
    if level == 0:
        from common import level1 as P
        P = partial(P, FOnly=True) # high order function, here we only test LEVEL-1 F CNN
    elif level == 1:
        from common import level1 as P
    elif level == 2:
        from common import level2 as P
    else:
        from common import level3 as P

    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 5))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = P(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error
def navigate_to():
    """
        Navigation path:
        Personal Information -> Course Registration -> Add or Drop Classes -> Registration Term -> Add or Drop Classes
    """
    common.logger("Navigating to add/drop page... \n")
    # Click: Course Registration
    data.driver.find_elements_by_xpath(
        "//*[contains(text(), 'Course Registration')]")[0].click()
    common.logger("Click: Course Registration")
    WebDriverWait(data.driver,
                  10).until(ec.title_contains("Course Registration"))
    common.random_wait()

    # Click: Add or Drop Classes
    data.driver.find_elements_by_xpath(
        "//*[contains(text(), 'Add or Drop Classes')]")[0].click()
    common.logger("Click: Add or Drop Classes")
    WebDriverWait(data.driver,
                  10).until(ec.title_contains("Registration Term"))
    common.random_wait()

    # Click: submit
    data.driver.find_elements_by_xpath("//*[@value='Submit']")[0].click()
    common.logger("Click: submit")
    WebDriverWait(data.driver,
                  10).until(ec.title_contains("Add or Drop Classes"))

    if "You may register during the following times:" in data.driver.page_source:
        common.logger(
            "Got add/drop page! "
            f"Please wait {data.info.start_timestamp - int(time.time())} seconds... \n"
        )
def generate_data(ftxt, fname):
    data = getDataFromTxt(ftxt)
    F_imgs = []
    F_landmarks = []
    F_genders = []
    F_smiles = []
    F_glasses = []
    F_poses = []
    F_all_attr = []

    for (imgPath, bbox, landmarkGt, gender, smile, glasses, pose,
         all_attr) in data:
        img = cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE)
        print(imgPath)
        assert (img is not None)
        logger("process %s" % imgPath)

        f_bbox = bbox
        #f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)

        f_face = img[f_bbox.top:f_bbox.bottom + 1,
                     f_bbox.left:f_bbox.right + 1]
        f_face = cv2.resize(f_face, (39, 39))
        f_face = f_face.reshape((39, 39, 1))
        f_face = f_face / 255.0

        f_landmark = landmarkGt.reshape((10))

        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)
        F_genders.append(gender)
        F_smiles.append(smile)
        F_glasses.append(glasses)
        F_poses.append(pose)
        F_all_attr.append(all_attr)

    F_imgs = np.asarray(F_imgs)
    F_landmarks = np.asarray(F_landmarks)
    F_genders = np.asarray(F_genders)
    F_smiles = np.asarray(F_smiles)
    F_glasses = np.asarray(F_glasses)
    F_poses = np.asarray(F_poses)
    F_all_attr = np.asarray(F_all_attr)

    shuffle_in_unison_scary(F_imgs, F_landmarks, F_genders, F_smiles,
                            F_glasses, F_poses, F_all_attr)

    logger("generate %s" % fname)
    with h5py.File(fname, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmarks'] = F_landmarks.astype(np.float32)
        h5['genders'] = F_genders.astype(np.float32)
        h5['smiles'] = F_smiles.astype(np.float32)
        h5['glasses'] = F_glasses.astype(np.float32)
        h5['poses'] = F_poses.astype(np.float32)
        h5['all_attr'] = F_all_attr.astype(np.float32)
def refresh():
    common.logger("Refreshing! \n")
    data.driver.refresh()
    try:
        data.driver.switch_to.alert.accept()
    except selenium.common.exceptions.NoAlertPresentException:
        pass

    WebDriverWait(data.driver,
                  10).until(ec.title_contains("Add or Drop Classes"))
def wait_until():
    now = int(time.time())
    waiting = data.info.start_timestamp - now

    if now < data.info.start_timestamp:
        common.logger(f"Please wait {waiting} seconds "
                      f"until {data.info.start_time}, "
                      f"during the time, do not close this program. \n")

        if waiting >= 300:
            time.sleep(waiting - 299)
            common.logger(
                f"5 minutes to {data.info.start_time}, now login! \n")
            login()
            navigate_to()
        else:
            common.logger(
                f"Less than 5 minutes to {data.info.start_time}, now login! \n"
            )
            login()
            navigate_to()
            time.sleep(waiting - 5)

        while int(time.time()) <= data.info.start_timestamp - 1:
            time.sleep(0.1)
    else:
        common.logger("Now login! \n")
        login()
        navigate_to()
Beispiel #12
0
def define(variables,
           msgdict,
           alternate_definition_index=0,
           ud_results_per_page=7):

    if msgdict["message"] in variables.yamldata["words"]:
        sa_def = __sa_dictionary(str(msgdict["message"]), variables.yamldata)
        resp = '<@{}> The Sys Admin dictionary defines `{}` as \n>>>{}'.format(
            msgdict["caller"], msgdict["message"], sa_def)
        __send_message(variables.sc, resp, msgdict["channel"],
                       msgdict["thread_ts"], custom_icon("icon_tux"))

    elif msgdict["message"]:
        parsed_message = msgdict["message"].split('alt:', 1)
        if len(parsed_message) > 1:
            try:
                alternate_definition_index = abs(int(parsed_message[1]))
            except ValueError:
                pass
        payload = {'term': parsed_message[0].strip()}

        if alternate_definition_index >= ud_results_per_page:
            payload['page'] = (alternate_definition_index //
                               ud_results_per_page) + 1
            alternate_definition_index %= ud_results_per_page
        r = requests.get("http://www.urbandictionary.com/define.php",
                         params=payload)
        try:
            soup = BeautifulSoup(r.content, "lxml")
        except ValueError:
            soup = "failure"
            logger("warn", "soup failure")
        definitions = soup.findAll("div", attrs={"class": "meaning"})
        try:
            resp = '<@{}> Urban Dictionary defines `{}` as ```{}```'.format(
                msgdict["caller"], parsed_message[0].strip(),
                definitions[alternate_definition_index].text)
        except IndexError:
            resp = '<@{}> Urban Dictionary doesn\'t have `{}` definitions for `{}`...'.format(
                msgdict["caller"], alternate_definition_index + 1,
                parsed_message[0].strip())
        __send_message(variables.sc, resp, msgdict["channel"],
                       msgdict["thread_ts"],
                       custom_icon("icon_urban_dictionary"))
    else:
        __send_message(variables.sc,
                       "what exactly are you asking me to define?",
                       msgdict["channel"], msgdict["thread_ts"],
                       custom_icon("icon_urban_dictionary"))
Beispiel #13
0
def generate(ftxt, mode, argument=False):
    '''
    第二阶段数据源制作
    :param ftxt: 数据源文件位置和label
    :param mode: 训练或测试
    :param argument:
    :return:
    '''
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.05)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx],
                                             padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s' % name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File(
                '/python/face_key_point/data_hdf5/train/2_%s/%s.h5' %
            (name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open(
                '/python/face_key_point/data_hdf5/train/2_%s/%s.txt' %
            (name, mode), 'w') as fd:
            fd.write('/python/face_key_point/data_hdf5/train/2_%s/%s.h5' %
                     (name, mode))
    def __init__(self, serverName, serverPort, cfg_params, proxyPath=None):
        """
        Open the communication with an Analysis Server by passing the server URL and the port
        """

        self.ServerTwiki = (
            "https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideCrabServerForUsers#Server_available_for_users"
        )

        self.asSession = C_AS_Session(serverName, serverPort)
        self.cfg_params = cfg_params
        self.userSubj = ""
        self.serverName = serverName
        credentialType = "Proxy"
        if common.scheduler.name().upper() in ["CAF", "LSF"]:
            credentialType = "Token"
        CliServerParams(self)
        self.crab_task_name = common.work_space.topDir().split("/")[-2]  # nice task name "crab_0_..."

        configAPI = {"credential": credentialType, "logger": common.logger()}

        CredAPI = CredentialAPI(configAPI)
        try:
            self.userSubj = CredAPI.getSubject()
        except Exception, err:
            common.logger.debug("Getting Credential Subject: " + str(traceback.format_exc()))
            raise CrabException("Error Getting Credential Subject")
Beispiel #15
0
    def configure(self,cfg_params):
        self.cfg_params = cfg_params
        self.deep_debug= self.cfg_params.get("USER.deep_debug",'0')
        server_check =  self.cfg_params.get("CRAB.server_name",None)
        if self.deep_debug == '1' and server_check != None :
            msg =  'You are asking the deep_debug, but it cannot works using the server.\n'
            msg += '\t The functionality will not have effect.'
            common.logger.info(msg) 
        self.schedulerName =  self.cfg_params.get("CRAB.scheduler",'') # this should match with the bosslite requirements
        self.rb_param_file=''

        self.wrapper = cfg_params.get('CRAB.jobtype').upper()+'.sh'


        ## Add here the map for others Schedulers (LSF/CAF/CondorG)
        SchedMap = {'glite':     schedulerGlite(),
                    'condor':   'SchedulerCondor',\
                    'condor_g': 'SchedulerCondorG',\
                    'glidein':  'SchedulerGlidein',\
                    'remoteglidein': 'SchedulerRemoteglidein',\
                    'lsf':      'SchedulerLsf',\
                    'caf':      'SchedulerLsf',\
                    'sge':      'SchedulerSge',\
                    'arc':      'SchedulerARC',\
                    'pbs':      'SchedulerPbs'
                    }

        self.schedulerConfig = common.scheduler.realSchedParams(cfg_params)
        self.schedulerConfig['name'] =  SchedMap[(self.schedulerName).lower()]
        self.schedulerConfig['timeout'] = self.setBliteTimeout()
        self.schedulerConfig['skipProxyCheck'] = True 
        self.schedulerConfig['logger'] = common.logger()

        self.session = None
        return
Beispiel #16
0
    def __init__(self, serverName, serverPort, cfg_params, proxyPath=None):
        """
        Open the communication with an Analysis Server by passing the server URL and the port
        """

        self.ServerTwiki = 'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideCrabServerForUsers#Server_available_for_users'

        self.asSession = C_AS_Session(serverName, serverPort)
        self.cfg_params = cfg_params
        self.userSubj = ''
        self.serverName = serverName
        credentialType = 'Proxy'
        if common.scheduler.name().upper() in ['CAF','LSF']:
            credentialType = 'Token'
        CliServerParams(self)
        self.crab_task_name = common.work_space.topDir().split('/')[-2] # nice task name "crab_0_..."

        configAPI = {'credential' : credentialType, \
                     'logger' : common.logger() }

        CredAPI =  CredentialAPI( configAPI )
        try:
            self.userSubj = CredAPI.getSubject()
        except Exception, err:
            common.logger.debug("Getting Credential Subject: " +str(traceback.format_exc()))
            raise CrabException("Error Getting Credential Subject")
Beispiel #17
0
 def ce_list(self):
     ceParser = CEBlackWhiteListParser(self.EDG_ce_white_list,
                                       self.EDG_ce_black_list,
                                       common.logger())
     wl = ','.join(ceParser.whiteList()) or None
     bl = ','.join(ceParser.blackList()) or None
     return '', wl, bl
Beispiel #18
0
    def __init__(self, endpoint, cachepath='', cacheduration = 0.5, timeout = 20, \
                 type = "txt/csv", logger = None ):
        ## if not already defined set default CachePath to $HOME/.cms_crab   
        if cachepath =='':
            import os
            if os.getenv('CMS_CRAB_CACHE_DIR'):
                cachepath ='%s/.cms_crab'%os.getenv('CMS_CRAB_CACHE_DIR') 
            elif os.getenv('HOME'):
                cachepath='%s/.cms_crab'%os.getenv('HOME')
            else:
                cachepath = '/tmp/crab_cache_' + pwd.getpwuid(os.getuid())[0]
 
        if not os.path.isdir(cachepath):
            try:
                os.mkdir(cachepath)
            except:
                common.logger.info('Warning cannot create %s. Using current directory'%cachepath)
                cachepath=os.getcwd() 
       
        if not logger: logger = common.logger()

        self.wmcorecache = {}
        self.wmcorecache['logger'] = logger
        self.wmcorecache['cachepath'] = cachepath   ## cache area
        self.wmcorecache['cacheduration'] = 0.5     ## half an hour
        self.wmcorecache['timeout'] = 20            ## seconds
        self.wmcorecache['endpoint'] = endpoint
Beispiel #19
0
 def __init__(self, port, allowed_lost, window_size, loss_rate, corrupt_rate, log_file):
     self.my_tunnel = common.magic_tunnel(loss_rate, corrupt_rate)
     self.my_logger = common.logger(log_file)
     self.my_wildcat_receiver = wildcat_receiver.wildcat_receiver(allowed_lost, window_size, self.my_tunnel, self.my_logger)
     self.my_wildcat_receiver.start()
     self.my_tunnel.my_recv = self.my_wildcat_receiver.receive
     self.udp_receiver = start_receiver.UDP_receiver(port, self.my_tunnel)
     self.udp_receiver.start()
Beispiel #20
0
def generate(ftxt, mode, argument=False):
    """
        Generate Training Data for LEVEL-3
        mode = train or test
    """
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.01)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx],
                                             padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s' % name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File(
                '/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.h5' %
            (name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open(
                '/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.txt' %
            (name, mode), 'w') as fd:
            fd.write(
                '/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.h5' %
                (name, mode))
Beispiel #21
0
def E():
    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 3))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        landmarkGt = landmarkGt[2:, :]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = NM(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error
def E():
    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 3))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        landmarkGt = landmarkGt[2:, :]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)

        landmarkP = NM(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error
Beispiel #23
0
    def BuildJobList(self, type=0):
        # total jobs
        nj_list = []
        self.complete_List = common._db.nJobs('list')
        if type == 1:
            self.nj_list = []
            if self.chosenJobsList: self.nj_list = self.chosenJobsList
            return
        # build job list
        from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser
        self.blackWhiteListParser = SEBlackWhiteListParser(
            self.seWhiteList, self.seBlackList, common.logger())
        common.logger.debug('nsjobs ' + str(self.nsjobs))
        # get the first not already submitted
        common.logger.debug('Total jobs ' + str(len(self.complete_List)))

        jobSetForSubmission = 0
        jobSkippedInSubmission = []
        tmp_jList = self.complete_List
        if self.chosenJobsList != None:
            tmp_jList = self.chosenJobsList
        for job in common._db.getTask(tmp_jList).jobs:
            cleanedBlackWhiteList = self.blackWhiteListParser.cleanForBlackWhiteList(
                job['dlsDestination'])
            if (cleanedBlackWhiteList != '') or (self.datasetPath == None):
                #if ( job.runningJob['status'] in ['C','RC'] and job.runningJob['statusScheduler'] in ['Created',None]):
                if (job.runningJob['state'] in ['Created']):
                    jobSetForSubmission += 1
                    nj_list.append(job['id'])
                else:
                    continue
            else:
                jobSkippedInSubmission.append(job['id'])
            if self.nsjobs > 0 and self.nsjobs == jobSetForSubmission:
                break
            pass
        if self.nsjobs > jobSetForSubmission:
            common.logger.info('asking to submit '+str(self.nsjobs)+' jobs, but only '+\
                                  str(jobSetForSubmission)+' left: submitting those')
        if len(jobSkippedInSubmission) > 0:
            mess = ""
            for jobs in jobSkippedInSubmission:
                mess += str(jobs) + ","
            common.logger.info(
                "Jobs:  " + str(mess) +
                "\n\tskipped because no sites are hosting this data\n")
            self.submissionError()
            pass
        # submit N from last submitted job
        common.logger.debug('nj_list ' + str(nj_list))
        self.nj_list = nj_list
        if self.limitJobs and len(self.nj_list) > 500:
            ###### FEDE FOR BUG 85243 ##############
            msg = "The CRAB client will not submit task with more than 500 jobs.\n"
            msg += "      Use the server mode or submit your jobs in smaller groups"
            raise CrabException(msg)
            ########################################
        return
Beispiel #24
0
 def __init__(self, name, server_creator, *server_creator_args):
     if server_creator is None:
         raise Exception("%s creator is None" % name)
     self._log = common.logger(name)
     self._name = name
     self._server_creator = server_creator
     self._server_creator_args = server_creator_args
     self._pid = None
     self._released_event = multiprocessing.Event()
Beispiel #25
0
def generate(txt, mode, N):
    """
        generate Training Data for LEVEL-2: patches around landmarks
        mode: train or validate
    """
    assert (mode == 'train')
    #从txt文件中获取数据
    data = getDataFromTxt(txt, True,
                          True)  #return [(image_path,landmarks,bbox)]
    #用defaultdict存储数据
    trainData = defaultdict(lambda: dict(landmarks=[], patches=[], label=[]))
    logger(
        "generate 25 positive samples and 500 negative samples for per landmark"
    )

    #产生正负训练样本
    trainData = generateSamples(trainData, data)
    print 'All data:' + str(len(trainData))
    #print trainData['L1']
    #arr = []

    for idx, name in types:  #19个点
        logger('writing training data of %s' % name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        label = np.asarray(trainData[name]['label'])

        #arr1 = landmarks.reshape(1,-1)
        #arr.append(arr1)

        patches = processImage(patches)
        shuffle_in_unison_scary(patches, landmarks, label)

        #将训练数据保存为hdf5格式
        with h5py.File('train/2_%s/%s.h5' % (name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
            h5['label'] = label.astype(np.uint8)
        with open('train/2_%s/%s.txt' % (name, mode), 'w') as fd:
            fd.write('train/2_%s/%s.h5' % (name, mode))
        #暂时只产生一个点的数据
        if idx == 1:
            break
def set_driver():
    # Set driver
    common.logger("Setting up browser driver...")
    chrome_options = webdriver.ChromeOptions()
    chrome_options.headless = True
    # chrome_options.add_argument('--headless')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument(
        'blink-settings=imagesEnabled==false')  # Not loading images
    chrome_options.add_argument(
        '--log-level=3')  # Disable logs issued by Chrome
    chrome_options.add_argument(f"user-agent={data.headers['User-Agent']}")

    driver = None
    if 'Windows' in platform.system():
        driver = webdriver.Chrome(executable_path='venv\chromedriver.exe',
                                  options=chrome_options)

    data.driver = driver
    common.logger("Driver is set! \n")
def generateSamples(testData,data,landmarks):
    for (imgPath,bbox) in data:
        img = cv2.imread(imgPath,cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger('process %s' % imgPath)
        height,width = img.shape[:2]
        #downsampled by 3: 3X3 Patch
        #size = (int(width/3),int(height/3))
        size = (width,height)#不进行下采样
        img = cv2.resize(img,size,interpolation=cv2.INTER_NEAREST)

        testData = getData(testData,landmarks,img)#test此时为一个字典,存储样本点和对应的图像块
        
        print ('After getting raw data,there are %d datas') % len(testData)
        for idx,landmark in enumerate(landmarks):
            # 产生样本点 samples
            landmark_samples = randomShiftWithArgument(landmark,0,100,150)
            testData = getData(testData,landmark_samples,img)
     
    return testData
Beispiel #28
0
def __send_snippet(sc, text, channel, thread_ts="", initial_comment='', title='-'):
    # Required slackclient, channel, thread_ts
    # thread_ts if this is a thread response
    logger('info', initial_comment)
    try:
        res = sc.api_call('files.upload',
                          channels=channel,
                          content=text,
                          initial_comment=initial_comment,
                          title=title,
                          thread_ts=thread_ts)
        logger('info', res)
        if res['ok'] != "True":
            logger("warn", "Error: __send_snippet API Error: %s" % res['error'])

    # KeyError is not going through logger, debug it
    except KeyError as e:
        logger("ignore", str(e) + " error in __send_snippet.")
    except Exception as e:
        logger("warn", "Unknown error in __send_snippet: " + str(e))
Beispiel #29
0
 def __init__(self, cfg_params, nj_list):
     self.cfg_params = cfg_params
     self.nj_list = nj_list
     from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser
     seWhiteList = cfg_params.get('GRID.se_white_list',[])
     seBlackList = cfg_params.get('GRID.se_black_list',[])
     self.blackWhiteListParser = SEBlackWhiteListParser(seWhiteList, seBlackList, common.logger())
     self.datasetpath=self.cfg_params['CMSSW.datasetpath']
     if string.lower(self.datasetpath)=='none':
         self.datasetpath = None
     return
Beispiel #30
0
 def __init__(self, cfg_params, nj_list):
     self.cfg_params = cfg_params
     self.nj_list = nj_list
     from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser
     seWhiteList = cfg_params.get('GRID.se_white_list', [])
     seBlackList = cfg_params.get('GRID.se_black_list', [])
     self.blackWhiteListParser = SEBlackWhiteListParser(
         seWhiteList, seBlackList, common.logger())
     self.datasetpath = self.cfg_params['CMSSW.datasetpath']
     if string.lower(self.datasetpath) == 'none':
         self.datasetpath = None
     return
def E(level=1):
    if level == 1:
        from common import level1 as P
    elif level == 2:
        from common import level2 as P

    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 19))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)

        landmarkP = P(img, bbox)

        #real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error
Beispiel #32
0
def __send_ephemeral(sc, text, channel, caller):
    try:
        res = sc.api_call('chat.postEphemeral',
                          as_user="******",
                          channel=channel,
                          user=caller,
                          text=text)
        if res['ok'] != "True":
            logger("warn", "Error: __send_ephemeral API Error: %s" % res['error'])
        logger('info', res)
    except KeyError as e:
        logger("ignore", str(e) + " error in __send_ephemeral.")
    except Exception as e:
        logger("warn", "Unknown error in __send_ephemeral: " + str(e))
def main():

    pointfiledir = os.path.join(dirname, 'AnnotationByMD/400_senior/')
    imgdir = os.path.join(dirname, 'TrainData')

    logger("generate active shape model!!")

    shapes = PointsReader.read_directory(pointfiledir)
    asm = ActiveShapeModel(shapes)

    logger("draw model fitter on the given images!")
    for i in range(1, 2):
        s = str(i).zfill(3) + '.bmp'

        # load the image
        img = cv2.imread(os.path.join(imgdir, s), cv2.CV_LOAD_IMAGE_GRAYSCALE)
        model = ModelFitter(asm, img)
        ShapeViewer.draw_model_fitter(model)

        for j in range(100):
            model.do_iteration(0)
            ShapeViewer.draw_model_fitter(model)
Beispiel #34
0
def generate(ftxt, mode, argument=False):
    """
        Generate Training Data for LEVEL-3
        mode = train or test
    """
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.01)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s'%name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File('train/3_%s/%s.h5'%(name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open('train/3_%s/%s.txt'%(name, mode), 'w') as fd:
            fd.write('train/3_%s/%s.h5'%(name, mode))
Beispiel #35
0
    def ce_list(self):
        """
        Returns string with requirement CE related
        """
        ceParser = CEBlackWhiteListParser(self.EDG_ce_white_list,
                                          self.EDG_ce_black_list,
                                          common.logger())
        req = ''
        ce_white_list = []
        ce_black_list = []
        if self.EDG_ce_white_list:
            ce_white_list = ceParser.whiteList()
            tmpCe = []
            concString = '&&'
            for ce in ce_white_list:
                tmpCe.append('RegExp("' + string.strip(ce) +
                             '", other.GlueCEUniqueId)')
            if len(tmpCe) == 1:
                req += " && (" + concString.join(tmpCe) + ") "
            elif len(tmpCe) > 1:
                firstCE = 0
                for reqTemp in tmpCe:
                    if firstCE == 0:
                        req += " && ( (" + reqTemp + ") "
                        firstCE = 1
                    elif firstCE > 0:
                        req += " || (" + reqTemp + ") "
                if firstCE > 0:
                    req += ") "

        if self.EDG_ce_black_list:
            ce_black_list = ceParser.blackList()
            tmpCe = []
            concString = '&&'
            for ce in ce_black_list:
                tmpCe.append('(!RegExp("' + string.strip(ce) +
                             '", other.GlueCEUniqueId))')
            if len(tmpCe): req += " && (" + concString.join(tmpCe) + ") "

        # requirement added to skip gliteCE
        # not more needed
#       req += '&& (!RegExp("blah", other.GlueCEUniqueId))'
        retWL = ','.join(ce_white_list)
        retBL = ','.join(ce_black_list)
        if not retWL:
            retWL = None
        if not retBL:
            retBL = None

        return req, retWL, retBL
Beispiel #36
0
def __impersonator(sc, text, channel, userpic, username, thread_ts=""):
    try:
        res = sc.api_call('chat.postMessage',
                          username=username,
                          icon_url=userpic,
                          as_user='******',
                          channel=channel,
                          text=text,
                          thread_ts=thread_ts)
        if res['ok'] != "True":
            logger("warn", "Error: __impersonator API Error: %s" % res['error'])
        logger('info', res)
    except KeyError as e:
        logger("ignore", str(e) + " error in __impersonator")
    except Exception as e:
        logger("warn", "Unknown error in __impersonator: " + str(e))
    def ce_list(self):
        """
        Returns string with requirement CE related
        """
        ceParser = CEBlackWhiteListParser(self.EDG_ce_white_list,
                                          self.EDG_ce_black_list, common.logger())
        req = ''
        ce_white_list = []
        ce_black_list = []
        if self.EDG_ce_white_list:
            ce_white_list = ceParser.whiteList()
            tmpCe=[]
            concString = '&&'
            for ce in ce_white_list:
                tmpCe.append('RegExp("' + string.strip(ce) + '", other.GlueCEUniqueId)')
            if len(tmpCe) == 1:
                req +=  " && (" + concString.join(tmpCe) + ") "
            elif len(tmpCe) > 1:
                firstCE = 0
                for reqTemp in tmpCe:
                    if firstCE == 0:
                        req += " && ( (" + reqTemp + ") "
                        firstCE = 1
                    elif firstCE > 0:
                        req += " || (" + reqTemp + ") "
                if firstCE > 0:
                    req += ") "

        if self.EDG_ce_black_list:
            ce_black_list = ceParser.blackList()
            tmpCe=[]
            concString = '&&'
            for ce in ce_black_list:
                tmpCe.append('(!RegExp("' + string.strip(ce) + '", other.GlueCEUniqueId))')
            if len(tmpCe): req += " && (" + concString.join(tmpCe) + ") "

        # requirement added to skip gliteCE
        # not more needed
 #       req += '&& (!RegExp("blah", other.GlueCEUniqueId))'
        retWL = ','.join(ce_white_list)
        retBL = ','.join(ce_black_list)
        if not retWL:
            retWL = None
        if not retBL:
            retBL = None

        return req, retWL, retBL
Beispiel #38
0
    def configure(self,cfg_params):
        self.cfg_params = cfg_params
        self.deep_debug= self.cfg_params.get("USER.deep_debug",'0')
        server_check =  self.cfg_params.get("CRAB.server_name",None)
        if self.deep_debug == '1' and server_check != None :
            msg =  'You are asking the deep_debug, but it cannot works using the server.\n'
            msg += '\t The functionality will not have effect.'
            common.logger.info(msg) 
        self.schedulerName =  self.cfg_params.get("CRAB.scheduler",'') # this should match with the bosslite requirements
        self.rb_param_file=''

        self.wrapper = cfg_params.get('CRAB.jobtype').upper()+'.sh'


        ## Add here the map for others Schedulers (LSF/CAF/CondorG)
        SchedMap = {'glite':     schedulerGlite(),
                    'condor':   'SchedulerCondor',\
                    'rcondor':  'SchedulerRcondor',\
                    'condor_g': 'SchedulerCondorG',\
                    'glidein':  'SchedulerGlidein',\
                    'lsf':      'SchedulerLsf',\
                    'caf':      'SchedulerLsf',\
                    'sge':      'SchedulerSge',\
                    'arc':      'SchedulerARC',\
                    'pbs':      'SchedulerPbs',\
                    'pbsv2':     'SchedulerPbsv2',\
                    'pbsv2withsrm':'SchedulerPbsv2withsrm',\
                    'pbsv2withlstore':'SchedulerPbsv2withlstore',\
                    }

        self.schedulerConfig = common.scheduler.realSchedParams(cfg_params)
        self.schedulerConfig['name'] =  SchedMap[(self.schedulerName).lower()]
        self.schedulerConfig['timeout'] = self.setBliteTimeout()
        self.schedulerConfig['skipProxyCheck'] = True 
        self.schedulerConfig['logger'] = common.logger()

        self.session = None
        return
Beispiel #39
0
    def checkProxy(self, minTime=10):
        """
        Function to check the Globus proxy.
        """
        if (self.proxyValid): return

        ### Just return if asked to do so
        if (self.dontCheckProxy==1):
            self.proxyValid=1
            return
        CredAPI_config =  { 'credential':'Proxy',\
                            'myProxySvr': self.proxyServer, \
                            'logger': common.logger() \
                          }   
        from ProdCommon.Credential.CredentialAPI import CredentialAPI 
        CredAPI = CredentialAPI(CredAPI_config)

        if not CredAPI.checkCredential(Time=int(minTime)) or \
           not CredAPI.checkAttribute(group=self.group, role=self.role):
            try:
                CredAPI.ManualRenewCredential(group=self.group, role=self.role) 
            except Exception, ex:
                raise CrabException(str(ex))   
Beispiel #40
0
    def configure(self, cfg_params):
        self.cfg_params = cfg_params
        self.deep_debug = self.cfg_params.get("USER.deep_debug", "0")
        server_check = self.cfg_params.get("CRAB.server_name", None)
        if self.deep_debug == "1" and server_check != None:
            msg = "You are asking the deep_debug, but it cannot works using the server.\n"
            msg += "\t The functionality will not have effect."
            common.logger.info(msg)
        self.schedulerName = self.cfg_params.get(
            "CRAB.scheduler", ""
        )  # this should match with the bosslite requirements
        self.rb_param_file = ""

        self.wrapper = cfg_params.get("CRAB.jobtype").upper() + ".sh"

        ## Add here the map for others Schedulers (LSF/CAF/CondorG)
        SchedMap = {
            "glite": schedulerGlite(),
            "condor": "SchedulerCondor",
            "condor_g": "SchedulerCondorG",
            "glidein": "SchedulerGlidein",
            "remoteglidein": "SchedulerRemoteglidein",
            "lsf": "SchedulerLsf",
            "caf": "SchedulerLsf",
            "sge": "SchedulerSge",
            "arc": "SchedulerARC",
            "pbs": "SchedulerPbs",
        }

        self.schedulerConfig = common.scheduler.realSchedParams(cfg_params)
        self.schedulerConfig["name"] = SchedMap[(self.schedulerName).lower()]
        self.schedulerConfig["timeout"] = self.setBliteTimeout()
        self.schedulerConfig["skipProxyCheck"] = True
        self.schedulerConfig["logger"] = common.logger()

        self.session = None
        return
Beispiel #41
0
 def ce_list(self):
     ceParser = CEBlackWhiteListParser(self.EDG_ce_white_list,
                                       self.EDG_ce_black_list, common.logger())
     wl = ','.join(ceParser.whiteList()) or None
     bl = ','.join(ceParser.blackList()) or None
     return '', wl, bl
Beispiel #42
0
    def configure(self, cfgParams):
        """
        Configure the scheduler with the config settings from the user
        """
        # FIXME: Get rid of try/except and use get() instead

        if not os.environ.has_key('EDG_WL_LOCATION'):
            # This is an ugly hack needed for SchedulerGrid.configure() to
            # work!
            os.environ['EDG_WL_LOCATION'] = ''

        if not os.environ.has_key('X509_USER_PROXY'):
            # Set X509_USER_PROXY to the default location.  We'll do this
            # because in functions called by Scheduler.checkProxy()
            # voms-proxy-info will be called with '-file $X509_USER_PROXY',
            # so if X509_USER_PROXY isn't set, it won't work.
            os.environ['X509_USER_PROXY'] = '/tmp/x509up_u' + str(os.getuid())

        SchedulerGrid.configure(self, cfgParams)
        if cfgParams.get('CRAB.server_name',None) or cfgParams.get('CRAB.use_server',None):
            pass
        else:
            self.checkCondorSetup()

        # init BlackWhiteListParser
        ceWhiteList = cfgParams.get('GRID.ce_white_list',[])
        ceBlackList = cfgParams.get('GRID.ce_black_list',[])
        self.ceBlackWhiteListParser = \
            CEBlackWhiteListParser(ceWhiteList, ceBlackList, common.logger())

        try:
            self.GLOBUS_RSL = cfgParams['CONDORG.globus_rsl']
        except KeyError:
            self.GLOBUS_RSL = ''

        # Provide an override for the batchsystem that condor_g specifies
        # as a grid resource. This is to handle the case where the site
        # supports several batchsystem but bdii only allows a site
        # to public one.
        try:
            self.batchsystem = cfgParams['CONDORG.batchsystem']
            msg = self.msgPre + 'batchsystem overide specified in your crab.cfg'
            common.logger.debug(msg)
        except KeyError:
            self.batchsystem = ''

        self.datasetPath = ''

        try:
            tmp =  cfgParams['CMSSW.datasetpath']
            if tmp.lower() == 'none':
                self.datasetPath = None
                self.selectNoInput = 1
            else:
                self.datasetPath = tmp
                self.selectNoInput = 0
        except KeyError:
            msg = "Error: datasetpath not defined "
            raise CrabException(msg)

        return
from StringIO import StringIO 
from lib import transmissionrpc
import sys, base64, os

torrent_files = sys.argv[1:]
client = get_client()

download_dir = client.session.download_dir

def upload_file(file_path):
    output = StringIO()
    base64.encode(file(file_path,"rb"), output)
    result = client.add(output.getvalue())
    logger.log(TorrentAddedEvent(file_path))

remove_files = True
for file_path in torrent_files:
    logger(UploadingTorrentEvent(file_path))
    try:
        upload_file(file_path)
        if remove_files: os.remove(file_path)
    except transmissionrpc.TransmissionError, e:
        if '"duplicate torrent"' in e.message:
            if remove_files: os.remove(file_path)
            logger.log(DuplicatedTorrent(file_path))
        else:
            logger.log(Error("Error uploading file", e))
    except Exception, e:
        logger.log(Error("Error uploading file", e))
    
  bname = profile.bestname()
  if not bname.isnumeric():
    confirmed_names.append(bname)

confirmed_names = list(set(confirmed_names))

wf = open(namesfile,'w')

for name in confirmed_names:
  wf.write('{}\n'.format(n))
wf.close()

logger.info('Wrote {} names to {}'.format(len(confirmed_names), namesfile))

#Set up to download search results.
logger('Initialising expansion DB.')
exp_db = prefix+'expand-db.csv'
exprawdir = prefix+'expand-raw'
expprofdir = prefix+'expand-profiles'
expstore = common.profilestore.ProfileStore(exp_df, logger)

searchers = []

if fbconn:
  searchers.append(facebook.search.FacebookSearch(fbconn, expstore, logger))
if gpconn:
  searchers.append(gplus.search.GPlusSearch(gpconn, expstore, logger))
if twconn:
  searchers.append(twitter.search.TwitterSearch(twconn, expstore, logger))
if liconn:
  searchers.append(linkedin.search.LinkedInSearch(expstore, logger))
transmission_home_dir = client.session.download_dir

if not transmission_home_dir.endswith("/"):
    transmission_home_dir += "/"
count = 0
for torrent in finished_torrents_with_new_files:
    files = [f["name"] for f in client.get_files(torrent.id)[torrent.id].values()
             if f["completed"] == f["size"]]
    f = file("files_to_download.txt","ab")
    for name in files:
        full_remote_path = transmission_home_dir+name
        local_dir = name.rsplit("/",1)[0]
        f.write('!mkdir -p "%s"\n' % local_dir)
        f.write('get -c "%s" -o "%s"\n' % (full_remote_path, name))
    f.close()
    
    f = file("already_downloaded.txt","a")
    f.write(torrent.name+"\n")
    f.close()

    logger(MarkToDownload(torrent.name))
    count += 1

if count == 0:
    logger(NoNewTorrents())

f = file("files_to_download.txt","ab")
f.write('!rm files_to_download.txt\n')
f.close()

Beispiel #46
0
'''
Example of Usage
'''

import common
from ccrawler import CCrawler, Request
from selector import HtmlSelector

import logging
logger = common.logger(name=__name__, filename='ccrawler.log', level=logging.DEBUG)

class DummySpider:
    start_urls = ['http://www.blueidea.com/photo/gallery/']
    #start_urls = ['http://disclosure.szse.cn/m/drgg000023.htm', 'http://disclosure.szse.cn/m/drgg000024.htm']
    #start_urls = ['http://www.baidu.com', 'http://www.google.com', 'http://www.google.hk']
    workers = 100
    timeout = 8

    def parse(self, response):
        hxs = HtmlSelector(response)
        '''
        Usage re
        '''
        '''
        itemlist = hxs.re('<td class=\'td10\'> .*?<\/td>')
        for item in itemlist:
            title = item.re('<a[^>]*[^>]*>(.*)[^<]*<\/a>')
            print title
        '''
Beispiel #47
0
from common import level1, level2, level3


TXT = 'dataset/test/lfpw_test_249_bbox.txt'

if __name__ == '__main__':
    assert(len(sys.argv) == 2)
    level = int(sys.argv[1])
    if level == 0:
        P = partial(level1, FOnly=True)
    elif level == 1:
        P = level1
    elif level == 2:
        P = level2
    else:
        P = level3

    OUTPUT = 'dataset/test/out_{0}'.format(level)
    createDir(OUTPUT)
    data = getDataFromTxt(TXT, with_landmark=False)
    for imgPath, bbox in data:
        img = cv2.imread(imgPath)
        assert(img is not None)
        imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        logger("process %s" % imgPath)

        landmark = P(imgGray, bbox)
        landmark = bbox.reprojectLandmark(landmark)
        drawLandmark(img, bbox, landmark)
        cv2.imwrite(os.path.join(OUTPUT, os.path.basename(imgPath)), img)
            myproxyserver = myproxyserver.strip()
            if myproxyserver is None:
                raise CrabException("myproxy_server.conf retrieved but empty")
        except Exception, e:
            common.logger.info("Problem setting myproxy server endpoint: using myproxy.cern.ch")
            common.logger.debug(e)
            myproxyserver = 'myproxy.cern.ch'

        configAPI = {'credential' : self.credentialType, \
                     'myProxySvr' : myproxyserver,\
                     'serverDN'   : self.server_dn,\
                     'shareDir'   : common.work_space.shareDir() ,\
                     'userName'   : getUserName(),\
                     'serverName' : self.server_name, \
                     'proxyPath'  : self.proxy_path, \
                     'logger'     : common.logger() \
                     }

        try:
            CredAPI =  CredentialAPI( configAPI )
        except Exception, err :
            common.logger.debug("Configuring Credential API: " +str(traceback.format_exc()))
            raise CrabException("ERROR: Unable to configure Credential Client API  %s\n"%str(err))


        if  self.credentialType == 'Proxy':
            # Proxy delegation through MyProxy, 4 days lifetime minimum
            if not CredAPI.checkMyProxy(Time=4, checkRetrieverRenewer=True) :
                common.logger.info("Please renew MyProxy delegated proxy:\n")
                try:
                    CredAPI.credObj.serverDN = self.server_dn
Beispiel #49
0
    def __init__( self, cfg_params,  args ):

        self.cfg_params = cfg_params
        self.args=args

        self.lumisPerJob = -1
        self.totalNLumis = 0
        self.theNumberOfJobs = 0
        self.limitNJobs = False
        self.limitTotalLumis = False
        self.limitJobLumis = False

        #self.maxEvents
        # init BlackWhiteListParser
        self.seWhiteList = cfg_params.get('GRID.se_white_list',[])
        if type(self.seWhiteList) == type("string"):
            self.seWhiteList = self.seWhiteList.split(',')
        seBlackList = cfg_params.get('GRID.se_black_list',[])
        if type(seBlackList) == type("string"):
            seBlackList = seBlackList.split(',')
        if common.scheduler.name().upper() == 'REMOTEGLIDEIN' :
            # use central black list
            removeBList = cfg_params.get("GRID.remove_default_blacklist", 0 )
            blackAnaOps = None
            if int(removeBList) == 0:
                blacklist = Downloader("http://cmsdoc.cern.ch/cms/LCG/crab/config/")
                result = blacklist.config("site_black_list.conf").strip().split(',')
                if result != None:
                    blackAnaOps = result
                    common.logger.debug("Enforced black list: %s "%blackAnaOps)
                else:
                    common.logger.info("WARNING: Skipping default black list!")
                if int(removeBList) == 0 and blackAnaOps: 
                    seBlackList += blackAnaOps

        self.blackWhiteListParser = SEBlackWhiteListParser(self.seWhiteList, seBlackList, common.logger())

        if seBlackList != []:
            common.logger.info("SE black list applied to data location: %s" %\
                           seBlackList)
        if self.seWhiteList != []:
            common.logger.info("SE white list applied to data location: %s" %\
                           self.seWhiteList)
        # apply BW list
        blockSites=args['blockSites']
        common.logger.debug("List of blocks and used locations (SE):")
        for block,dlsDest in blockSites.iteritems():
            noBsites=self.blackWhiteListParser.checkBlackList(dlsDest)
            sites=self.blackWhiteListParser.checkWhiteList(noBsites)
            if sites : blockSites[block]=sites
            common.logger.debug("%s : %s" % (block,sites))
        args['blockSites']=blockSites
            
        ## check if has been asked for a non default file to store/read analyzed fileBlocks
        defaultName = common.work_space.shareDir()+'AnalyzedBlocks.txt'
        self.fileBlocks_FileName = os.path.abspath(self.cfg_params.get('CMSSW.fileblocks_file',defaultName))
Beispiel #50
0
    def BuildJobList(self,type=0):
        # total jobs
        nj_list = []
        self.complete_List = common._db.nJobs('list')
        if type==1: 
            self.nj_list =[]
            if self.chosenJobsList: self.nj_list = self.chosenJobsList
            return
        # build job list
        from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser
        self.blackWhiteListParser = SEBlackWhiteListParser(self.seWhiteList, self.seBlackList, common.logger())
        common.logger.debug('nsjobs '+str(self.nsjobs))
        # get the first not already submitted
        common.logger.debug('Total jobs '+str(len(self.complete_List)))

        jobSetForSubmission = 0
        jobSkippedInSubmission = []
        tmp_jList = self.complete_List
        if self.chosenJobsList != None:
            tmp_jList = self.chosenJobsList
        for job in common._db.getTask(tmp_jList).jobs:
            cleanedBlackWhiteList = self.blackWhiteListParser.cleanForBlackWhiteList(job['dlsDestination'])
            if (cleanedBlackWhiteList != '') or (self.datasetPath == None):
                #if ( job.runningJob['status'] in ['C','RC'] and job.runningJob['statusScheduler'] in ['Created',None]):
                if ( job.runningJob['state'] in ['Created']):
                    jobSetForSubmission +=1
                    nj_list.append(job['id'])
                else:
                    continue
            else :
                jobSkippedInSubmission.append( job['id'] )
            if self.nsjobs >0 and self.nsjobs == jobSetForSubmission:
                break
            pass
        if self.nsjobs>jobSetForSubmission:
            common.logger.info('asking to submit '+str(self.nsjobs)+' jobs, but only '+\
                                  str(jobSetForSubmission)+' left: submitting those')
        if len(jobSkippedInSubmission) > 0 :
            mess =""
            for jobs in jobSkippedInSubmission:
                mess += str(jobs) + ","
            common.logger.info("Jobs:  " +str(mess) + "\n\tskipped because no sites are hosting this data\n")
            self.submissionError()
            pass
        # submit N from last submitted job
        common.logger.debug('nj_list '+str(nj_list))
        self.nj_list = nj_list
        if self.limitJobs and len(self.nj_list) > 500:
            ###### FEDE FOR BUG 85243 ############## 
            msg = "The CRAB client will not submit task with more than 500 jobs.\n"
            msg += "      Use the server mode or submit your jobs in smaller groups"
            raise CrabException(msg)
            ########################################
        return
Beispiel #51
0
def generate_hdf5(ftxt, output, fname, argument=False):

    data = getDataFromTxt(ftxt)
    F_imgs = []
    F_landmarks = []
    EN_imgs = []
    EN_landmarks = []
    NM_imgs = []
    NM_landmarks = []

    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)
        # F
        f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
        f_face = img[f_bbox.top:f_bbox.bottom+1,f_bbox.left:f_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > -1:
            ### flip
            face_flipped, landmark_flipped = flip(f_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (39, 39))
            F_imgs.append(face_flipped.reshape((1, 39, 39)))
            F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), 5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), -5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))

        f_face = cv2.resize(f_face, (39, 39))
        en_face = f_face[:31, :]
        nm_face = f_face[8:, :]

        f_face = f_face.reshape((1, 39, 39))
        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)

        # EN
        # en_bbox = bbox.subBBox(-0.05, 1.05, -0.04, 0.84)
        # en_face = img[en_bbox.top:en_bbox.bottom+1,en_bbox.left:en_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(en_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[:3, :].reshape((6))
            EN_imgs.append(face_flipped)
            EN_landmarks.append(landmark_flipped)

        en_face = cv2.resize(en_face, (31, 39)).reshape((1, 31, 39))
        en_landmark = landmarkGt[:3, :].reshape((6))
        EN_imgs.append(en_face)
        EN_landmarks.append(en_landmark)

        # NM
        # nm_bbox = bbox.subBBox(-0.05, 1.05, 0.18, 1.05)
        # nm_face = img[nm_bbox.top:nm_bbox.bottom+1,nm_bbox.left:nm_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(nm_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[2:, :].reshape((6))
            NM_imgs.append(face_flipped)
            NM_landmarks.append(landmark_flipped)

        nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 31, 39))
        nm_landmark = landmarkGt[2:, :].reshape((6))
        NM_imgs.append(nm_face)
        NM_landmarks.append(nm_landmark)

    #imgs, landmarks = process_images(ftxt, output)

    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    EN_imgs, EN_landmarks = np.asarray(EN_imgs), np.asarray(EN_landmarks)
    NM_imgs, NM_landmarks = np.asarray(NM_imgs),np.asarray(NM_landmarks)

    F_imgs = processImage(F_imgs)
    shuffle_in_unison_scary(F_imgs, F_landmarks)
    EN_imgs = processImage(EN_imgs)
    shuffle_in_unison_scary(EN_imgs, EN_landmarks)
    NM_imgs = processImage(NM_imgs)
    shuffle_in_unison_scary(NM_imgs, NM_landmarks)

    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)

    # eye and nose
    base = join(OUTPUT, '1_EN')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = EN_imgs.astype(np.float32)
        h5['landmark'] = EN_landmarks.astype(np.float32)

    # nose and mouth
    base = join(OUTPUT, '1_NM')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = NM_imgs.astype(np.float32)
        h5['landmark'] = NM_landmarks.astype(np.float32)
Beispiel #52
0
    def __init__( self, cfg_params,  args ):
        self.cfg_params = cfg_params
        self.args=args

        self.lumisPerJob = -1
        self.totalNLumis = 0
        self.theNumberOfJobs = 0
        self.limitNJobs = False
        self.limitTotalLumis = False
        self.limitJobLumis = False

        #self.maxEvents
        # init BlackWhiteListParser
        self.seWhiteList = cfg_params.get('GRID.se_white_list',[])
        seBlackList = cfg_params.get('GRID.se_black_list',[])
        self.blackWhiteListParser = SEBlackWhiteListParser(self.seWhiteList, seBlackList, common.logger())

        ## check if has been asked for a non default file to store/read analyzed fileBlocks
        defaultName = common.work_space.shareDir()+'AnalyzedBlocks.txt'
        self.fileBlocks_FileName = os.path.abspath(self.cfg_params.get('CMSSW.fileblocks_file',defaultName))
        
        self.global_data_service = int( self.cfg_params.get('CMSSW.global_data_service', 0) )
        self.global_data_rewrite = int( self.cfg_params.get('CMSSW.global_data_rewrite', 1) )
Beispiel #53
0
    def configure(self, cfg_params):
        self._boss.configure(cfg_params)
        self.CRAB_useServer = cfg_params.get('CRAB.use_server',0)
        self.CRAB_serverName = cfg_params.get('CRAB.server_name',None)
        seWhiteList = cfg_params.get('GRID.se_white_list',[])
        seBlackList = cfg_params.get('GRID.se_black_list',[])
        self.dontCheckMyProxy=int(cfg_params.get("GRID.dont_check_myproxy",0))
        self.EDG_requirements = cfg_params.get('GRID.requirements',None)
        self.EDG_addJdlParam = cfg_params.get('GRID.additional_jdl_parameters',None)
        if (self.EDG_addJdlParam):
            self.EDG_addJdlParam = string.split(self.EDG_addJdlParam,';')

        self.pset = cfg_params.get('CMSSW.pset',None)
        self.blackWhiteListParser = SEBlackWhiteListParser(seWhiteList, seBlackList, common.logger())

        self.return_data = int(cfg_params.get('USER.return_data',0))
        self.copy_data = int(cfg_params.get('USER.copy_data',0))
        self.publish_data = cfg_params.get("USER.publish_data",0)
        self.local_stage = int(cfg_params.get('USER.local_stage_out',0))
        self.check_RemoteDir =  int(cfg_params.get('USER.check_user_remote_dir',1))

        if int(self.copy_data) == 1:
            self.SE = cfg_params.get('USER.storage_element',None)
            if not self.SE:
                msg = "Error. The [USER] section does not have 'storage_element'"
                common.logger.info(msg)
                raise CrabException(msg)

        if ( int(self.return_data) == 0 and int(self.copy_data) == 0 ):
            msg = 'Error: return_data = 0 and copy_data = 0 ==> your exe output will be lost\n'
            msg = msg + 'Please modify return_data and copy_data value in your crab.cfg file\n'
            raise CrabException(msg)

        if ( int(self.return_data) == 1 and int(self.copy_data) == 1 ):
            msg = 'Error: return_data and copy_data cannot be set both to 1\n'
            msg = msg + 'Please modify return_data or copy_data value in your crab.cfg file\n'
            raise CrabException(msg)

        if ( int(self.copy_data) == 0 and int(self.local_stage) == 1 ):
            msg = 'Error: copy_data = 0 and local_stage_out = 1.\n'
            msg += 'To enable local stage out the copy_data value has to be = 1\n'
            msg = msg + 'Please modify copy_data value in your crab.cfg file\n'
            raise CrabException(msg)

        if ( int(self.copy_data) == 0 and int(self.publish_data) == 1 ):
            msg = 'Error: publish_data = 1 must be used with copy_data = 1\n'
            msg = msg + 'Please modify copy_data value in your crab.cfg file\n'
            common.logger.info(msg)
            raise CrabException(msg)

        if ( int(self.publish_data) == 1 and self._name == 'lsf'):
            msg = 'Error: data publication is not allowed with lsf scheduler, but only with grid scheduler or caf\n'
            common.logger.info(msg)
            raise CrabException(msg)

        self.debug_wrapper = int(cfg_params.get('USER.debug_wrapper',0))
        self.debugWrap=''
        if self.debug_wrapper==1: self.debugWrap='--debug'
        self.loc_stage_out = ''
        if ( int(self.local_stage) == 1 ):
            self.debugWrap='--debug'
            self.loc_stage_out='--local_stage'

        # Time padding for minimal job duration.
        self.minimal_job_duration = 10

        return