Exemple #1
0
def check(process_output, judge_output, split_on='lines', **kwargs):
    split_pattern = {
        'lines': b'[\r\n]',
        'whitespace': b'[\s]',
    }.get(split_on)

    if not split_pattern:
        raise InternalError('invalid `split_on` mode')

    process_lines = list(
        filter(None, resplit(split_pattern, utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(split_pattern, utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    if split_on == 'lines':
        process_lines = list(map(six.binary_type.split, process_lines))
        judge_lines = list(map(six.binary_type.split, judge_lines))

    process_lines.sort()
    judge_lines.sort()

    for process_line, judge_line in zip(process_lines, judge_lines):
        if process_line != judge_line:
            return False

    return True
Exemple #2
0
def check(process_output, judge_output, precision, **kwargs):
    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    epsilon = 10**-int(precision)

    try:
        for process_line, judge_line in zip(process_lines, judge_lines):
            process_tokens = process_line.split()
            judge_tokens = judge_line.split()

            if len(process_tokens) != len(judge_tokens):
                return False

            for process_token, judge_token in zip(process_tokens,
                                                  judge_tokens):
                try:
                    judge_float = float(judge_token)
                except:
                    if process_token != judge_token:
                        return False
                else:
                    process_float = float(process_token)
                    # since process_float can be nan, this is NOT equivalent to (process_float - judge_float) > epsilon
                    # the code below will always reject nan, even if judge_float is nan
                    if not abs(process_float - judge_float) <= epsilon:
                        return False
    except:
        return False
    return True
Exemple #3
0
def check(process_output: bytes,
          judge_output: bytes,
          point_value: float,
          feedback: bool = True,
          match: Callable[[bytes, bytes],
                          bool] = lambda p, j: p.strip() == j.strip(),
          **kwargs) -> Union[CheckerResult, bool]:
    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) > len(judge_lines):
        return False

    if not judge_lines:
        return True

    if isinstance(match, str):
        match = eval(match)

    cases = [verdict[0]] * len(judge_lines)
    count = 0

    for i, (process_line,
            judge_line) in enumerate(zip(process_lines, judge_lines)):
        if match(process_line, judge_line):
            cases[i] = verdict[1]
            count += 1

    return CheckerResult(count == len(judge_lines),
                         point_value * (1.0 * count / len(judge_lines)),
                         ''.join(cases) if feedback else "")
Exemple #4
0
def check(process_output: bytes,
          judge_output: bytes,
          point_value: float = 1,
          point_distribution: List[int] = [1],
          filler_lines_required: bool = True,
          **kwargs) -> Union[CheckerResult, bool]:
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(judge_lines) != len(point_distribution):
        raise InternalError(
            'point distribution length must equal to judge output length')

    if sum(point_distribution) == 0:
        raise InternalError('sum of point distribution must be positive')

    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))

    if filler_lines_required and len(process_lines) != len(judge_lines):
        return False

    points = 0
    for process_line, judge_line, line_points in zip(process_lines,
                                                     judge_lines,
                                                     point_distribution):
        if process_line == judge_line:
            points += line_points

    return CheckerResult(points > 0,
                         point_value * (points / sum(point_distribution)))
Exemple #5
0
def check(process_output, judge_output, split_on='lines', **kwargs):
    split_pattern = {
        'lines': b'[\r\n]',
        'whitespace': b'[\s]',
    }.get(split_on)

    if not split_pattern:
        raise InternalError('invalid `split_on` mode')

    process_lines = list(filter(None, resplit(split_pattern, utf8bytes(process_output))))
    judge_lines = list(filter(None, resplit(split_pattern, utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    if split_on == 'lines':
        process_lines = list(map(six.binary_type.split, process_lines))
        judge_lines = list(map(six.binary_type.split, judge_lines))

    process_lines.sort()
    judge_lines.sort()

    for process_line, judge_line in zip(process_lines, judge_lines):
        if process_line != judge_line:
            return False

    return True
Exemple #6
0
def check(process_output: bytes,
          judge_output: bytes,
          point_value: float,
          feedback: bool = True,
          **kwargs) -> Union[CheckerResult, bool]:
    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) > len(judge_lines):
        return False

    if not judge_lines:
        return True

    cases = [verdict[0]] * len(judge_lines)
    count = 0

    for i, (process_line,
            judge_line) in enumerate(zip(process_lines, judge_lines)):
        if process_line.strip() == judge_line.strip():
            cases[i] = verdict[1]
            count += 1

    return CheckerResult(count == len(judge_lines),
                         point_value * count / len(judge_lines),
                         extended_feedback='Case Feedback:\n' +
                         ''.join(cases) if feedback else '')
Exemple #7
0
def check(process_output,
          judge_output,
          precision=6,
          error_mode='default',
          **kwargs):
    # Discount empty lines
    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    verify_float = {
        'absolute': verify_absolute,
        'relative': verify_relative,
        'default': verify_default,
    }.get(error_mode)

    if not verify_float:
        raise InternalError('invalid `error_mode` value')

    epsilon = 10**-int(precision)

    try:
        for process_line, judge_line in zip(process_lines, judge_lines):
            process_tokens = process_line.split()
            judge_tokens = judge_line.split()

            if len(process_tokens) != len(judge_tokens):
                return False

            for process_token, judge_token in zip(process_tokens,
                                                  judge_tokens):
                # Allow mixed tokens, for lines like "abc 0.68 def 0.70"
                try:
                    judge_float = float(judge_token)
                except:
                    # If it's not a float the token must match exactly
                    if process_token != judge_token:
                        return False
                else:
                    process_float = float(process_token)

                    if not verify_float(process_float, judge_float, epsilon):
                        return False
    except:
        return False
    return True
Exemple #8
0
def picRead(pics):
    temp = []
    tempDict = dict()
    if isinstance(pics, list):
        for eachPic in pics:
            tempDict = dict()
            tempDict['pic'] = imreadCH(eachPic)
            tempDict['obj'] = resplit(r'[\\ /]', eachPic)[-1]
            temp.append(tempDict)
        return temp
    else:
        tempDict['pic'] = imreadCH(pics)
        tempDict['obj'] = resplit(r'[\\ /]', pics)[-1]
        return tempDict
Exemple #9
0
def check(process_output, judge_output, **kwargs):
    process_lines = resplit(b'[\r\n]', utf8bytes(process_output))
    judge_lines = resplit(b'[\r\n]', utf8bytes(judge_output))

    if 'filter_new_line' in kwargs:
        process_lines = list(filter(None, process_lines))
        judge_lines = list(filter(None, judge_lines))

    if len(process_lines) != len(judge_lines):
        return False

    for process_line, judge_line in zip(process_lines, judge_lines):
        if process_line.rstrip() != judge_line.rstrip():
            return False

    return True
Exemple #10
0
def href_resolve(root, path, href):
    protocol_split = resplit("(\\:\\/\\/|\\:)", href, maxsplit=1)
    if len(protocol_split) > 1:
        return href
    if href.startswith("/"):
        return root+href
    return root+"/"+path+"/"+href
Exemple #11
0
def _qual_subbuttons(task_id, qual, msg):
    buttons = button_build.ButtonMaker()
    task_info = listener_dict[task_id]
    formats_dict = task_info[6]
    qual_fps_ext = resplit(r'p|-', qual, maxsplit=2)
    height = qual_fps_ext[0]
    fps = qual_fps_ext[1]
    ext = qual_fps_ext[2]
    tbrs = []
    for tbr in formats_dict[qual]:
        tbrs.append(tbr)
    tbrs.sort(reverse=True)
    for index, br in enumerate(tbrs):
        if index == 0:
            tbr = f">{br}"
        else:
            sbr = index - 1
            tbr = f"<{tbrs[sbr]}"
        if fps != '':
            video_format = f"bv*[height={height}][fps={fps}][ext={ext}][tbr{tbr}]"
        else:
            video_format = f"bv*[height={height}][ext={ext}][tbr{tbr}]"
        size = formats_dict[qual][br]
        buttonName = f"{br}K ({get_readable_file_size(size)})"
        buttons.sbutton(str(buttonName), f"qu {task_id} {video_format}")
    buttons.sbutton("Back", f"qu {task_id} back")
    buttons.sbutton("Cancel", f"qu {task_id} cancel")
    SUBBUTTONS = InlineKeyboardMarkup(buttons.build_menu(2))
    editMessage(f"Choose Video Bitrate for <b>{qual}</b>:", msg, SUBBUTTONS)
Exemple #12
0
 def configure(self):
     """Reload the configuration."""
     self.configuration['cfgfile'] = self.cfgname
     try:
         with open(self.cfgname) as f:
             for l in f:
                 line = l.strip()
                 try:
                     if line[0] == '#': continue
                     k, v = resplit(' |\t', line, 1)
                     self.configuration[k] = v.strip()
                 except IndexError:
                     continue  # empty line
         try:
             self.logger.setLogfile(self.configuration['log_filename'])
         except KeyError:
             self.logger.setLogfile('./pcm.log')
         self.logger.log(
             'Configuration reloaded for ' + self.configuration['name'] +
             '.', 1)
         return self.configuration
     except Exception as e:
         self.logger.log(' *** Configuration error! − ' + str(e) + ' ***',
                         4)
         return False
Exemple #13
0
def matchImg(imgsrc,
             imgobj,
             confidencevalue=0.8,
             targetSize=(1440, 810)):  #imgsrc=原始图像,imgobj=待查找的图片
    '用于查找原始图片中的单一目标图片,如果原始图片中可找到多个目标图片,则随机返回一个匹配的结果,返回值为一个字典'
    try:
        if isinstance(imgsrc, str):
            imsrc = imreadCH(imgsrc)
        else:
            imsrc = imgsrc
    except RuntimeError:
        return None
    #imobj = imread(imgobj)
    if isinstance(imgobj, str):
        imobj = imreadCH(imgobj)
    else:
        imobj = imgobj['pic']  #现在此情况传入的一定是字典
    if targetSize != (0, 0):
        imsrc = resize(imsrc, targetSize)

    match_result = find_template(imsrc, imobj, confidencevalue)
    #match_result = None
    if match_result != None:
        if isinstance(imgobj, str):
            match_result['obj'] = resplit(r'[\\ /]', imgobj)[-1]
        else:
            match_result['obj'] = imgobj['obj']

    #delImg(imgsrc)
    return match_result
Exemple #14
0
def check(process_output, judge_output, **kwargs):
    process_lines = resplit(b'[\r\n]', utf8bytes(process_output))
    judge_lines = resplit(b'[\r\n]', utf8bytes(judge_output))

    if kwargs.get('filter_new_line'):
        process_lines = list(filter(None, process_lines))
        judge_lines = list(filter(None, judge_lines))

    if len(process_lines) != len(judge_lines):
        return False

    for process_line, judge_line in zip(process_lines, judge_lines):
        if process_line.rstrip() != judge_line.rstrip():
            return False

    return True
Exemple #15
0
def wellBoreDataLineAnalysis(line):
    line = line.replace("\t"," ")
    #lineList = line.split(" ")
    lineList = resplit("=| ",line)
    lineList = [a for a in lineList if a != ""]
    #print (lineList)
    if ("Real" in lineList) and ("!" in lineList):
        pythonString = lineList[0]+" = "+lineList[2].split("!")[0]
        #print ("dbg wellbore int",pythonString)
        #print ("dbg wellbore lineList[0]",lineList[0])
        #print ("dbg wellbore lineList[2]",lineList[2].split("!")[0])
        #raw_input()
        return pythonString,lineList[0],lineList[2].split("!")[0], "Real",lineList[4].replace("\n","")
    elif "Real" in lineList:
        pythonString = lineList[0]+" = "+lineList[2]
        #print ("dbg wellbore int",pythonString)
        #print ("dbg wellbore lineList[0]",lineList[0])
        #print ("dbg wellbore lineList[2]",lineList[2].split("!")[0])
        return pythonString,lineList[0],lineList[2].split("!")[0], "Real","! Int. coef"
    elif "Int" in lineList and ("!" in lineList):
        #print(" line list int: ",lineList)
        pythonString = lineList[0]+" = "+lineList[2].split("!")[0]
        return pythonString,lineList[0],lineList[2].split("!")[0], "Int","".join(lineList[4:]).replace("\n","")
    elif "Int" in lineList:
        #print(" line list int: ",lineList)
        pythonString = lineList[0]+" = "+lineList[2].split("!")[0]
        return pythonString,lineList[0],lineList[2].split("!")[0], "Int","! Int. coef"
    elif "Logical" in lineList:
        return lineList[0], lineList[1], lineList[2]
    elif "Material" in lineList:
        return lineList[lineList.index('Material')+1].replace("\n","")
    elif "Variable" in lineList:
        pythonString = lineList[0]+" = "+lineList[2].split("!")[0]
        return pythonString,lineList[0],lineList[2].split("!")[0], "Variable",lineList[4].replace("\n","")
Exemple #16
0
 def _ids(self, box='INBOX', search='ALL', charset=None, byUid=False):
     """"""
     status, resp = self.cnx.select(box)
     if status == self.KO :
         self.createBox(box)
         self.cnx.select(box)
     status, resp = self.cnx.search(charset, '(%s)' % search)
     return resplit(' ',Io.str(resp[self.K_HEAD]))
Exemple #17
0
 def most_recent_configsets(self):
     """Issue HTTP Get Request to SolrCloud API to retrieve Recent ConfigSets."""
     configs = defaultdict(set)
     for configset in self.get_configsets():
         split = resplit(r'-(\d+)', configset)
         if len(split) > 1:
             config, version, *_ = split
             configs[config].add(int(version))
     return [f"{configset}-{max(versions)}" for (configset, versions) in configs.items()]
Exemple #18
0
def check(process_output, judge_output, **kwargs):
    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    process_lines = list(map(six.binary_type.split, process_lines))
    judge_lines = list(map(six.binary_type.split, judge_lines))
    process_lines.sort()
    judge_lines.sort()

    for process_line, judge_line in zip(process_lines, judge_lines):
        if process_line != judge_line:
            return False

    return True
Exemple #19
0
def check(process_output, judge_output, precision=6, error_mode='default', **kwargs):
    # Discount empty lines
    process_lines = list(filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    verify_float = {
        'absolute': verify_absolute,
        'relative': verify_relative,
        'default': verify_default,
    }.get(error_mode)

    if not verify_float:
        raise InternalError('invalid `error_mode` value')

    epsilon = 10 ** -int(precision)

    try:
        for process_line, judge_line in zip(process_lines, judge_lines):
            process_tokens = process_line.split()
            judge_tokens = judge_line.split()

            if len(process_tokens) != len(judge_tokens):
                return False

            for process_token, judge_token in zip(process_tokens, judge_tokens):
                # Allow mixed tokens, for lines like "abc 0.68 def 0.70"
                try:
                    judge_float = float(judge_token)
                except:
                    # If it's not a float the token must match exactly
                    if process_token != judge_token:
                        return False
                else:
                    process_float = float(process_token)

                    if not verify_float(process_float, judge_float, epsilon):
                        return False
    except:
        return False
    return True
Exemple #20
0
def check(process_output: bytes, judge_output: bytes, **kwargs) -> bool:
    process_lines = resplit(b'[\r\n]', utf8bytes(process_output))
    judge_lines = resplit(b'[\r\n]', utf8bytes(judge_output))

    #l'usuari fara una sola linea d'output, el nom del seu fitxer
    filename = process_lines[0].decode('utf-8')

    #fitxer esperat passat a caracters utf8
    enjin = [x.decode('utf-8') for x in list(filter(None, judge_lines))]
    strexpected = '\n'.join(map(str, enjin))
    print(strexpected)

    #rescato el nom del fitxer i l'obro
    myfile = open("/outputfiles/" + filename, "rb").read()
    file_lines = resplit(b'[\r\n]', utf8bytes(myfile))

    #fitxer de l'usuari passat a caracters utf8
    enjin = [x.decode('utf-8') for x in list(filter(None, file_lines))]
    data_to_read = '\n'.join(map(str, enjin))
    print(data_to_read)

    #comparacio
    print(file_lines)
    print(judge_lines)

    #pots esborrar ja el fitxer i que no ocupi memòria (no hi ha risc de fitxer de 3GB, perque donaria MLE)
    os.remove("/outputfiles/" + filename)

    if len(file_lines) != len(judge_lines):
        #print("ep")

        return CheckerResult(False, 0,
                             "Els fitxers no tenen les mateixes línees",
                             strexpected + "\u2719" + data_to_read)

    for process_line, judge_line in zip(file_lines, judge_lines):
        if process_line.rstrip() != judge_line.rstrip():
            #print("aaa",process_line.rstrip,judge_line.rstrip())
            return CheckerResult(False, 0, "Els fitxers no son iguals",
                                 strexpected + "\u2719" + data_to_read)

    return True
Exemple #21
0
 def status(self, box='INBOX'):
     """"""
     status, resp = self.cnx.status(box, '(MESSAGES RECENT UIDNEXT UIDVALIDITY UNSEEN)')
     if status == self.OK :
         data = research(self.REG_SATUS, Io.str(resp[self.K_HEAD]))
         l    = resplit(' ',data.group(2))
         dic  = {'BOX' : box}
         for i in range(len(l)):
             if i%2 == 0 : dic[l[i]] = int(l[i+1])
     else : dic = {}
     return dic
Exemple #22
0
def get_voter_id(href):
    _id = ""
    join = False
    for part in href.split("/"):
        part = resplit("(\\?|\\#)", part)[0]
        if not join:
            if part == "biografien":
                join = True
            continue
        _id += part+"/"
    return _id[:len(_id)-1]
Exemple #23
0
def check(process_output, judge_output, precision, **kwargs):
    # Discount empty lines
    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    epsilon = 10**-int(precision)

    try:
        for process_line, judge_line in zip(process_lines, judge_lines):
            process_tokens = process_line.split()
            judge_tokens = judge_line.split()

            if len(process_tokens) != len(judge_tokens):
                return False

            for process_token, judge_token in zip(process_tokens,
                                                  judge_tokens):
                # Allow mixed tokens, for lines like "abc 0.68 def 0.70"
                try:
                    judge_float = float(judge_token)
                except:
                    # If it's not a float the token must match exactly
                    if process_token != judge_token:
                        return False
                else:
                    process_float = float(process_token)
                    # process_float can be nan
                    # in this case, we reject nan as a possible answer, even if judge_float is nan
                    if not abs(process_float - judge_float) <= epsilon and \
                            (not abs(judge_float) >= epsilon or not abs(1.0 - process_float / judge_float) <= epsilon):
                        return False
    except:
        return False
    return True
Exemple #24
0
def check(process_output, judge_output, precision, **kwargs):
    process_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(
        filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    epsilon = 10**-int(precision)

    try:
        for process_line, judge_line in zip(process_lines, judge_lines):
            process_tokens = process_line.split()
            judge_tokens = judge_line.split()

            if len(process_tokens) != len(judge_tokens):
                return False

            for process_token, judge_token in zip(process_tokens,
                                                  judge_tokens):
                try:
                    judge_float = float(judge_token)
                except:
                    if process_token != judge_token:
                        return False
                else:
                    process_float = float(process_token)
                    p1 = min(judge_float * (1 - epsilon),
                             judge_float * (1 + epsilon))
                    p2 = max(judge_float * (1 - epsilon),
                             judge_float * (1 + epsilon))
                    # since process_float can be nan, this is NOT equivalent to (process_float < p1 or process_float > p2)
                    if not (p1 <= process_float <= p2):
                        return False
    except:
        return False
    return True
Exemple #25
0
def get_wiki_snippet(res):
    if res['query']['searchinfo']['totalhits'] > 0:
        # get title
        title = res['query']['search'][0]['title']
        # make parser object
        parser = htmlParser()
        # feed snippet to htmlparser
        parser.feed(res['query']['search'][0]['snippet'])
        # get cleaned snippet and split by sentence
        text = resplit(
            r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', parser.get_data())
        return "Results for {}: \n{}".format(title, text[0])
    else:
        return "No results found."
Exemple #26
0
def select_format(update, context):
    query = update.callback_query
    user_id = query.from_user.id
    data = query.data
    msg = query.message
    data = data.split(" ")
    task_id = int(data[1])
    try:
        task_info = listener_dict[task_id]
    except:
        return editMessage("This is old task", msg)
    uid = task_info[1]
    if user_id != uid:
        return query.answer(text="Don't waste your time!", show_alert=True)
    elif data[2] == "dict":
        query.answer()
        qual = data[3]
        return _qual_subbuttons(task_id, qual, msg)
    elif data[2] == "back":
        query.answer()
        return editMessage('Choose Video Quality:', msg, task_info[4])
    elif data[2] == "audio":
        query.answer()
        if len(data) == 4:
            playlist = True
        else:
            playlist = False
        return _audio_subbuttons(task_id, msg, playlist)
    elif data[2] != "cancel":
        query.answer()
        listener = task_info[0]
        link = task_info[2]
        name = task_info[3]
        args = task_info[5]
        qual = data[2]
        if qual.startswith(
                'bv*['
        ):  # To not exceed telegram button bytes limits. Temp solution.
            height = resplit(r'\[|\]', qual, maxsplit=2)[1]
            qual = qual + f"+ba/b[{height}]"
        if len(data) == 4:
            playlist = True
        else:
            playlist = False
        ydl = YoutubeDLHelper(listener)
        Thread(target=ydl.add_download,
               args=(link, f'{DOWNLOAD_DIR}{task_id}', name, qual, playlist,
                     args)).start()
    del listener_dict[task_id]
    query.message.delete()
Exemple #27
0
    def getTaskList(self, taskName):
        task = self.run('tasklist')
        taskList = task.split('\n')
        taskAdb = []
        for eachTask in taskList:
            if taskName in eachTask:
                taskAdb.append(eachTask)
        pidList = []
        if taskAdb != []:
            for eachAdb in taskAdb:
                pid = resplit(r'\s+', eachAdb)[1]
                pidList.append(pid)

        return pidList
Exemple #28
0
def check(process_output, judge_output, point_value, feedback=True,
          match=lambda p, j: p.strip() == j.strip(), **kwargs):
    process_lines = list(filter(None, resplit(b'[\r\n]', utf8bytes(process_output))))
    judge_lines = list(filter(None, resplit(b'[\r\n]', utf8bytes(judge_output))))

    if len(process_lines) > len(judge_lines):
        return False

    if not judge_lines:
        return True

    if isinstance(match, six.string_types):
        match = eval(match)

    cases = [verdict[0]] * len(judge_lines)
    count = 0

    for i, (process_line, judge_line) in enumerate(zip(process_lines, judge_lines)):
        if match(process_line, judge_line):
            cases[i] = verdict[1]
            count += 1

    return CheckerResult(count == len(judge_lines), point_value * (1.0 * count / len(judge_lines)),
                         ''.join(cases) if feedback else "")
Exemple #29
0
def parselines(lines, tol=TOL, projective=False, as_set=False):
    """    
    Keyword arguments:
    lines -- iterable of strings, first entry the number of points;
             the rest, "%s %s" % real, imag
    tol   -- optional float, smallest allowable nonzero value
    """
    from re import split as resplit
    from naglib.core.misc import dps

    lines = striplines(lines)
    points = []

    numpoints = int(lines[0])
    if numpoints == 0:
        return points
    lines = lines[1:]
    length = len(lines)
    numvar = length // numpoints

    for i in range(0, length, numvar):
        point = lines[i:i + numvar]
        point = [resplit(r'\s+', p) for p in point]
        newpoint = []
        for p in point:
            real, imag = p
            if imag[-1] == ';':
                imag = imag[:-1]

            real = Float(real, dps(real))
            imag = Float(imag, dps(imag))

            if abs(real) < tol:
                real = 0
            if abs(imag) < tol:
                imag = 0

            newpoint.append(real + I * imag)
        if projective:
            points.append(ProjectivePoint(newpoint))
        else:
            points.append(AffinePoint(newpoint))

    if as_set:
        points = list(set(points))

    return points
Exemple #30
0
def shell(dps):
    with open(session.LOG_FILENAME) as file:
        for entry in file:
            entry = entry.rstrip()
            cmd = resplit(r'[^\\],', entry)[5]
            if cmd != "" and cmd != "What":  # remove CSV line head
                dps.prompt_session.history.append_string(cmd.rstrip())
    try:
        last_string = dps.prompt_session.prompt(
            auto_suggest=AutoSuggestFromHistory())
        dps_cmd.hook(last_string, dpsrc, session, prompt_ui, dps)
        dps.update_prompt()
    except KeyboardInterrupt:
        #exit_gracefully()
        pass
    except EOFError:
        exit_gracefully()
Exemple #31
0
    def post(self, request, *args, **kwargs):
        form = self.form_class(data=request.POST)

        if not form.is_valid():
            return render(request, self.template_name, {'form': form})

        tag_titles = resplit(r'[, ]', form.cleaned_data['tags'.lower()])
        new_quest = form.save(commit=False)
        new_quest.author_id = request.user.id
        new_quest.save()
        for title in tag_titles:
            try:
                tag = Tag.objects.get(title=title)
            except ObjectDoesNotExist:
                tag = Tag.objects.create(title=title)
            new_quest.tags.add(tag)
        return redirect('/question/id' + str(new_quest.pk))
Exemple #32
0
def sanitizer_opts(env_data):
    """Parse the values defined in given *SAN_OPTIONS environment variable.
    For example "ASAN_OPTIONS=debug=false:log_path='/test/file.log'"
    would return {"debug": "false", "log_path": "'/test/file.log'"}

    Args:
        env_var (str): *SAN_OPTIONS environment variable to parse.

    Returns:
        dict: Sanitized values from environment.
    """
    opts = dict()
    for opt in resplit(r":(?![\\|/])", env_data):
        if not opt:
            continue
        key, val = opt.split("=")
        opts[key] = val
    return opts
Exemple #33
0
def get_domains(topology, signalp=[]):
    domains = []
    for sp in signalp:
        if sp != "OTHER" and sp != '':
            domains.append({'c': 0, 'class': sp, 'shape': 'circle'})
    if len(topology) > 1:
        tms = resplit('[io]', str(topology).strip())
        for t in tms:
            if not t:
                continue
            start, end = t.split('-')
            domains.append({
                'start': start,
                'end': end,
                'class': 'helix',
                'shape': 'rect'
            })
    return domains
Exemple #34
0
def main():
    # Read the command line options
    parser = ArgumentParser(
        description='Highly configurable prompt decoration for ZSH and Bash.')
    parser.add_argument('-v',
                        '--version',
                        help='show version and exit',
                        action='store_true')
    parser.add_argument('-r',
                        '--right',
                        help='build right hand side prompt',
                        action='store_true')
    parser.add_argument('RC',
                        nargs='?',
                        help='return code of the executed command')
    args = parser.parse_args()

    if args.version:
        print("PBT v%s" % VERSION)
        exit(0)

    if version_info < (3, 0):
        import sys
        reload(sys)
        sys.setdefaultencoding('utf8')

    if args.right:
        cars_str = getenv('PBT_RCARS', "Time")
    else:
        cars_str = getenv('PBT_CARS', "Status, Os, Hostname, Dir, Git, Sign")
    cars_names = resplit(r'\s*,\s*', cars_str)
    cars = []

    for car in cars_names:
        try:
            cars.append(
                getattr(import_module('pbt.cars.%s' % car), '%sCar' % car)())
        except Exception:
            stderr.write("ERROR: Cannot import module %sCar.\n" % car)

            if getenv('PBT_DEBUG', False) in BOOL_TRUE:
                print_exc(file=stderr)

    print_train(cars, args.right)
Exemple #35
0
    def reconcile_samples(self, samples, samples_file, allow_all=False):

        if samples is None:
            samples = []

        if hasattr(samples_file, "read"):
            keep_samples = []
            for line in samples_file:
                if line.startswith("#"):
                    continue
                else:
                    iid = line.strip().split().pop(0)
                    if not iid in self.samples:
                        self.log.warning(
                            "Sample '{}' in populations file but not in VCF; skpping it."
                            .format(iid))
                        continue
                    keep_samples.append(iid)
            self.log.info("Read {} samples from file <{}>".format(
                len(keep_samples), samples_file.name))
        elif len(samples):
            keep_samples = []
            for iid in samples:
                keep_samples.extend(resplit("[,;|]", iid))
            self.log.info("Read {} samples from command line".format(
                len(keep_samples)))
        else:
            if not allow_all:
                self.log.error("Must specify at least one focal sample.")
                raise ValueError
            else:
                self.log.info("No samples specified, so keeping all of them.")
                keep_samples = self.samples

        keep_samples = set(self.samples) & set(keep_samples)
        keep_samples = list(keep_samples)
        nsamples = len(keep_samples)
        self.log.info(
            "Retained {} distinct samples that were verified in VCF header.".
            format(nsamples))

        self._working_samples = keep_samples
        return keep_samples
Exemple #36
0
def parse_variable(user_input):
    user_var = resplit(' *= *', user_input)

    # check the var name for validity, returns dictionary with variables
    for char in user_var[0]:
        if char not in ascii_letters:
            print('Invalid identifier')
            return

    # check if more than 1 '=' signs
    if len(user_var) != 2:
        print('Invalid assignment1')
        return

    # check if new var value is existing var
    if user_var[1] in variables:
        variables[user_var[0]] = variables[user_var[1]]
    else:
        try:
            variables[user_var[0]] = int(user_var[1])
        except ValueError:
            print('Invalid assignment2')
Exemple #37
0
def wellBoreDataLineAnalysis(line):
    line = line.replace("\t", " ")
    #lineList = line.split(" ")
    lineList = resplit("=| ", line)
    lineList = [a for a in lineList if a != ""]
    #print (lineList)
    if ("Real" in lineList) and ("!" in lineList):
        pythonString = lineList[0] + " = " + lineList[2].split("!")[0]
        #print ("dbg wellbore int",pythonString)
        #print ("dbg wellbore lineList[0]",lineList[0])
        #print ("dbg wellbore lineList[2]",lineList[2].split("!")[0])
        #raw_input()
        return pythonString, lineList[0], lineList[2].split(
            "!")[0], "Real", lineList[4].replace("\n", "")
    elif "Real" in lineList:
        pythonString = lineList[0] + " = " + lineList[2]
        #print ("dbg wellbore int",pythonString)
        #print ("dbg wellbore lineList[0]",lineList[0])
        #print ("dbg wellbore lineList[2]",lineList[2].split("!")[0])
        return pythonString, lineList[0], lineList[2].split(
            "!")[0], "Real", "! Int. coef"
    elif "Int" in lineList and ("!" in lineList):
        #print(" line list int: ",lineList)
        pythonString = lineList[0] + " = " + lineList[2].split("!")[0]
        return pythonString, lineList[0], lineList[2].split(
            "!")[0], "Int", "".join(lineList[4:]).replace("\n", "")
    elif "Int" in lineList:
        #print(" line list int: ",lineList)
        pythonString = lineList[0] + " = " + lineList[2].split("!")[0]
        return pythonString, lineList[0], lineList[2].split(
            "!")[0], "Int", "! Int. coef"
    elif "Logical" in lineList:
        return lineList[0], lineList[1], lineList[2]
    elif "Material" in lineList:
        return lineList[lineList.index('Material') + 1].replace("\n", "")
    elif "Variable" in lineList:
        pythonString = lineList[0] + " = " + lineList[2].split("!")[0]
        return pythonString, lineList[0], lineList[2].split(
            "!")[0], "Variable", lineList[4].replace("\n", "")
Exemple #38
0
	def prepare_query_frags(self):
		"""
		Construit self.api_strs et self.api_toks
		   (permettent d'utiliser les infos structurées
		    de l'elt XML dans une query API type Lucene)

		api_strs --- re-dispatch d'un dict de str[] provenant du XML
		             vers un dict de str[] rangé par champs API
		             (permet les comparaisons strictes avec un hit API)

		api_toks --- même structure mais avec les chaînes de cara
		             tokenisées par re.split('\W').
		             et filtrées sur longueur >= 4 sauf exceptions (volume, pages)
		             (permet des requêtes efficaces dans l'API)

		On utilise record() pour mettre à jour les dict en filtrant les [] et les [""]
		"""

		# pour la forme tokénisée on comptera les tokens retenus
		total_kept = 0

		# dictionnaire {k => [strs]} d'expressions relevées
		# (rangé ici par xpath via préalable subvalues())
		for bibxpath in self.tei_subvals:

			# todo clarifier pourquoi parfois dans les natives on a direcement du texte dans monogr/imprint
			# debug
			#~ if bibxpath == "monogr/imprint":
				#~ warn ("DEBUG: infos directement sous imprint ? %s" % str(self.tei_subvals))

			# traduction des clés  TEI => API
			# --------------------------------
			# ex: monogr/author/surname => host.author.name
			champ = self.xpath_to_api_field(bibxpath)


			# un même point source peut avoir plusieurs chaînes
			# (notamment les noms d'auteurs) donc on a une mini boucle
			for whole_str in self.tei_subvals[bibxpath]:

				# on ignore certaines valeurs (ex: les initiales de prénoms)
				if champ == '__IGNORE__':
					continue

				# on stocke chaque chaîne avec cette nouvelle clé
				else:
					# 1 - sous forme intacte
					#     ------------------
					self.api_strs = self.record(self.api_strs, champ, whole_str)


					# 2 - sous forme tokénisée
					#     --------------------
					# from re import split as resplit
					#     on utilise [^\w?*] au lieu de \W parce qu'on ne
					#     veut pas couper sur les jokers lucene
					#     cf. text_to_query_fragment dans bib_subvalues qui
					#         a le droit d'ajouter des '?' et '*'
					for tok in resplit(r'[^\w?*]', whole_str):
						# warn("TOK %s" % tok)

						# champs ayant le droit d'être courts
						if champ in ['host.volume', 'host.issue','host.pages.first','host.pages.last']:

							# si API demande des valeurs numériques -------->8----
							# ----------------------------------------------------
							# on doit intercepter les cas rares non numériques
							# ex: cas volumes = "12 B" ou issue = "suppl 1"
							if search(r'[^0-9]', tok):

								# la partie num la plus longue
								grep_partie_num = search(r'([0-9]+)', tok)
								if grep_partie_num is not None:
									tok = grep_partie_num.groups()[0]
								else:
									# s'il n'y a rien de numérique on skip ce token
									continue
							# ----------------------------------------------------
							# ---------------------------------------------->8----

							# en tout cas enregistrement même si le token est court
							self.api_toks = self.record(self.api_toks, champ, tok)
							total_kept += 1

						# champs ayant le droit à des tokens plus courts
						elif champ in ['host.title', 'author.name'] and (len(tok) >= 2 or tok in ['j','J',']']):
							self.api_toks = self.record(self.api_toks, champ, tok)
							total_kept += 1

						# autres champs: tokens suffisemment longs uniquement
						elif len(tok) >= 4:
							self.api_toks = self.record(self.api_toks, champ, tok)
							total_kept += 1


		# à la fin de chaque bib on vérifie si on a bien des tokens
		if total_kept == 0:
			msg = "WARNING: filtrage des tokens courts a tout supprimé (valeurs d'origine: '%s')" % self.tei_subvals
			warn(msg)
			self.log.append(msg)
Exemple #39
0
def read(file_name, mode, key_col, val_cols, split_re, has_header=False,
         header_dict={}, ignore_chars=[], map_fs={}, reduce_fs={},
         filter_f=None, state={}, update_state=None):

  lineno = 0
  headers = header_dict
  key_val_dict = {}
  current_state = state.copy()

  with open(file_name, mode) as file:

    for line in file:
      if line[0] in ignore_chars: continue
      a = resplit(split_re, line[:-1])

      if has_header:
        if lineno == 0:
          if headers == {}:
            headers[key_col] = a[key_col]
            for val_col in val_cols:
              headers[val_col] = a[val_col]
          lineno += 1
          continue

      if update_state != None: update_state(current_state, a)

      if filter_f != None and not filter_f(current_state, a):
        lineno += 1
        continue

      if key_col == None:
        key = check_and_apply_2(map_fs, 'key', current_state, lineno)
      else:
        key = check_and_apply_2(map_fs, 'key', current_state, a[key_col])

      for val_col in val_cols:

        val_name = headers[val_col] if val_col in headers else val_col
        if type(val_col) is str:
          if val_col in current_state:
            val = check_and_apply_2(map_fs, val_col, current_state, current_state[val_col])
          else:
            print('Error:' + val_col + ' not ' + 'in states', file=sys.stderr)
            sys.exit()
        else:
          val = check_and_apply_2(map_fs, val_col, current_state, a[val_col])

        if key in key_val_dict:
          if val_name in key_val_dict[key]:
            current_val = key_val_dict[key][val_name]
            reduced_val = check_and_apply_2(reduce_fs, val_col, current_val, val)
            key_val_dict[key][val_name] = reduced_val

          else:
            key_val_dict[key][val_name] = val
        else:
          key_val_dict[key] = {}
          key_val_dict[key][val_name] = val

      lineno += 1

  return (key_val_dict, current_state)
def convertPcToPdc(deffile, particlesName, startframe, endframe, pdcIncrements,
                   outputDirectory, questionasked):
    '''Converts Houdini's Point Cache file to Maya's Point Disc Cach file'''
    print "time(): %f " %  time()
    print 'ReadFile:'
    dataType = {'Integer': 0, 'Integer Array': 1, 'Double': 2,
                    'Double Array': 3, 'Vector': 4, 'Vector Array': 5}
    readFile = open(str(deffile), 'r', buffering=1024*10)
    fileContent = readFile.read()
    content = resplit('\[|\]| |', fileContent)
    readFile.close()
    count = 0
    del fileContent
    vartest = 0
    vartest1 = 0
    vartesta = 0
    vartestb = 0
    vartestc = 0
    vartestd = 0
    noIds = 0
    noCoords = 0
    for count, item in enumerate(content):
        if search('\"P\"', item):
           index1 = count
           vartest = 1
        if search('\"id\"', item):
           index2 = count
           vartest1 = 1
        if vartest == 1:
            if vartesta == 0:
                if search('$^', item):
                   if count > index1:
                       index3 = count
                       vartesta = 1
            if vartesta == 1:           
                if vartestb == 0: 
                    if search('\"', item):
                       if count > index3:
                           index4 = count
                           vartestb = 1
            if vartest1 == 1:           
                if vartestc == 0:
                    if search('$^', item):
                       if count > index2:
                           index5 = count
                           vartestc = 1   
                if vartestc == 1:
                    if vartestd == 0:
                        if search('\"', item):
                           if count > index5:
                               index6 = count
                               vartestd = 1
    if vartest == 1:
       coords = content[(index3 + 1):index4]
    if vartest == 0:
       raise RuntimeError('No Coords found in file, is this a .pc.classic or .bgeo.classic file?')
    print "time(): %f " %  time()
    print 'Grouping:'
    tempString = ','.join(str(n) for n in coords)
    del coords
    running = True
    while running:    
        if tempString[-1].isdigit() == False:
            tempString = tempString[:-1]
        else:
            running = False
    tempString += ',,,'
    print "time(): %f " %  time()
    print 'Coords: '
    s = ''.join(tempString.split())
    del tempString
    split = s.split(",")
    outputs = [' '.join(split[6*i:6*i+3]) for i in range(len(split)//6)]
    outputs = ' '.join(outputs)
    outputs = outputs.split(' ')
    coords = tuple(float(f) for f in outputs)
    del outputs
    if vartest1 == 0:
        noIds = 1
        numberofids = len(coords)/3 
        ids = range(numberofids)
        ids = [float(x) for x in ids]
        ids = tuple(ids)
    if vartest1 == 1:
        ids = content[index5:index6]
        print "time(): %f " %  time()
        print 'Ids: '
        tempString2 = ','.join(str(n) for n in ids)
        running = True
        while running:    
            if tempString2[-1].isdigit() == False:
                tempString2 = tempString2[:-1]
            else:
                running = False
        tempString2 = tempString2[1:]
        ids = tempString2.split(',')
        del tempString2
        ids = [float(x.strip()) for x in ids]
        ids = tuple(ids)
    print "time(): %f " %  time()
    print 'Set Attributes:'
    scaleFactor = 1    
    particlecount = (len(coords))
    count = 0
    num = range(len(coords))
    attributes = ['position','particleId']
    fileType = 'PDC ' 
    formatVersion = 1 
    byteOrder = 1 
    offset = -1
    extra1 = 0 
    extra2 = 0 
    particlesTotal = len(coords)/3
    print "time(): %f " %  time()
    print 'Set Header Values and records2:'
    attributesTotal = 2
    headerValues = (fileType, formatVersion, byteOrder, extra1,
                    extra2, particlesTotal, attributesTotal)
    recordsValues2 = (len(attributes[1 + offset]), attributes[1 + offset],
                      dataType['Vector Array'])
    recordsValues2 += coords
    scaleFactor = 1
    print "time(): %f " %  time()
    print 'Set Records 3:'
    recordsValues3 = (len(attributes[2 + offset]), attributes[2 + offset],
                      dataType['Double Array'])
    recordsValues3 += ids
    recordsForm = ' i{0}si{1}d i{2}si{3}d'.format(str(len(attributes[1 + offset])),
                                                      str(3* particlesTotal),
                                                      str(len(attributes[2 + offset])),
                                                      str(len(ids)))
    headerForm = '>4sii2iii'
    form = Struct(headerForm + recordsForm)
    allValues = headerValues + recordsValues2 + recordsValues3
    del headerValues
    del recordsValues2
    del recordsValues3
    del headerForm
    del recordsForm
    packedData = form.pack(*allValues)
    del allValues
    del form
    print "time(): %f " %  time()
    print 'Writing File: \n'
    fileName = particlesName + '.' + str(pdcIncrements) + ".pdc"    
    outputPDCfile = open(outputDirectory + '\\Data\\' + fileName, 'wb')
    outputPDCfile.write(packedData)
    outputPDCfile.close()
    del packedData
    print 'done'
    print "time(): %f " %  time()
    if noIds == 1:
        warn(('No Id Values were included\n'
                       'Id values were assigned based on order of particles\n'
                      'This may cause unexpected results, to fix add a id attribute in houdini'),
                      DeprecationWarning)
Exemple #41
0
def read_eclipse_file(filename):

    def parse_line(line):
        words = line.split()
        starttime = words[0][4:] + ':' + words[0][:3] + ':' + words[1]
        stoptime = words[2][4:] + ':' + words[2][:3] + ':' + words[3]

        returndict = {'Start Time': starttime,
                      'Stop Time': stoptime,
                      'Duration': words[4],
                      'Current Condition': words[5],
                      'Obstruction': words[6],
                      'durationsec': npdouble(words[4]),
                      'startsec': DateTime(starttime).secs,
                      'stopsec': DateTime(stoptime).secs}

        if len(words) == 9:
            returndict.update({'Entry Timer': words[7],
                               'timersec': npdouble(words[7]),
                               'Type': words[8]})

        return returndict

    with open(filename, 'rb') as fid:
        datalines = fid.readlines()

    # The first line includes the year and day the file was generated
    #
    # Note: This entry may be manually created and could be a source of error
    # if read incorrectly.
    words = datalines.pop(0).split()
    #eclipse = {'epoch':dict(zip(('year','day'), (words[-2][:4],words[-2][5:])))}
    eclipse = {'epoch': {'year': words[2]}}
    eclipse['epoch'].update({'dom': words[0]})
    eclipse['epoch'].update({'month': words[1]})
    eclipse['epoch'].update({'time': words[3]})

    hosc = DateTime(words[2] + words[1] + words[0] + ' at ' + words[3]).date
    eclipse['epoch'].update({'doy': hosc[5:8]})

    # Remove spacing lines
    line = datalines.pop(0)
    while len(line.strip()) < 50:
        line = datalines.pop(0)

    headers = resplit("\s{2,5}", line.strip())

    # Truncate the Start Time, Stop Time and Duration header names
    headers[0] = headers[0][:10]
    headers[1] = headers[1][:9]
    headers[2] = headers[2][:8]

    # Remove the dashed lines separating the header from the eclipse data entries
    line = datalines.pop(0)

    # This is the eclipse number; it is used to index all eclipses in the
    # file. It has no other significance.
    n = -1
    eclipse.update({'eclipse_nums': []})

    while len(datalines) > 0:
        line = datalines.pop(0).strip()

        # All eclipse entries start wth at least 7 "words"
        if len(line.split()) == 7:

            # increment the eclipse number and create a placeholder dict
            n = n + 1
            eclipse['eclipse_nums'].append(n)
            eclipse.update({n: {}})

            # Add the entrance penumbra data, there will always be an entrance
            # penumbra
            eclipsedata = parse_line(line)
            eclipse[n].update({'entrancepenumbra': eclipsedata})

            # If this is a full eclipse, then there will also be umbra and
            # exit penumbra phases.
            if len(datalines) > 0:
                if 'Umbra' in datalines[0]:

                    line = datalines.pop(0)
                    eclipsedata = parse_line(line)
                    eclipse[n].update({'umbra': eclipsedata})

                    line = datalines.pop(0)
                    eclipsedata = parse_line(line)
                    eclipse[n].update({'exitpenumbra': eclipsedata})

    return eclipse
Exemple #42
0
#Parse dataset input names
if options.dsinputs=='' or options.TR==0:
	dep_check()
	print "*+ Need at least dataset inputs and TE. Try meica.py -h"
	sys.exit()
if os.path.abspath(os.path.curdir).__contains__('meica.'):
	print "*+ You are inside a ME-ICA directory! Please leave this directory and rerun."
	sys.exit()

#Parse shorthand input file specification and TEs
tes=split(options.tes,',')
outprefix=options.prefix
if '[' in options.dsinputs:
	shorthand_dsin = True
	dsinputs=dsprefix(options.dsinputs)
	prefix=resplit(r'[\[\],]',dsinputs)[0]
	datasets=resplit(r'[\[\],]',dsinputs)[1:-1]
	trailing=resplit(r'[\]+]',dsinputs)[-1]
	isf= dssuffix(options.dsinputs)
	setname=prefix+''.join(datasets)+trailing+options.label
else:
	#Parse longhand input file specificiation
	shorthand_dsin = False
	datasets_in = options.dsinputs.split(',')
	datasets = [str(vv+1) for vv in range(len(tes))]
	prefix = dsprefix(datasets_in[0])
	isf = dssuffix(datasets_in[0])
	if '.nii' in isf: isf='.nii'
	trailing=''
	setname=prefix+options.label