コード例 #1
0
 def saveTweetsToImg(self,tweets,tasktype:str):
     driver = self.driver
     path = os.path.join('transtweet','res_tweets',tasktype)
     check_path(path)
     cout_id = 0
     for tweet in tweets:
         if 'elem' in tweet:
             cout_id = cout_id + 1
             #tweet['base64'] = tweet['elem'].screenshot_as_base64
             self.savePngToFile(tweet['relem'].screenshot_as_png,str(cout_id),path=path)
     driver.execute_script('window.scrollTo(0,0)')
     return 'success!'
コード例 #2
0
    def get_software(self):
        """ 
        performs OCP 4.3 software bits download from the base urls
        specified in the class __init__ 
   
        """

        logging.info('downloading OCP 4.3 software bits into {}'.format(self.software_dir))
        for url_key in self.ocp_urls.keys():
            url = self.ocp_urls[url_key]
            dest_name = url.split('/')[-1]
            dest_path = self.software_dir + '/' + dest_name
            dest_path_exist = check_path(dest_path, isfile=True)
            url_check = ''
            if dest_path_exist:
                logging.info('file {} already exists in {}'.format(dest_name, self.software_dir))
                self.inventory_dict['csah']['vars'][url_key] = dest_name
            else:
                url_check = validate_url(url)
                if url_check == '':
                    logging.error('file {} in {} is not available'.format(dest_name, url_key))
                    self.inventory_dict['csah']['vars'][url_key] = ''

            if url_check != '' and url_check.code == 200:
                logging.info('downloading {}'.format(dest_name))
                urlretrieve('{}'.format(url),'{}/{}'.format(self.software_dir, dest_name))
                self.inventory_dict['csah']['vars'][url_key] = dest_name
コード例 #3
0
    def set_nodes_inventory(self):
        nodes_inventory_check = check_path(self.nodes_inventory, isfile=True)

        if nodes_inventory_check:
            with open(r'{}'.format(self.nodes_inventory)) as nodes_inv:
                self.nodes_inv = yaml.load(nodes_inv, Loader=yaml.FullLoader)
        else:
            logging.error('incorrect nodes inventory specified: {}'.format(
                self.nodes_inventory))
            sys.exit()
コード例 #4
0
    def set_nodes_inventory(self):
        """ 
        read inventory file specifying bootstrap, control and compute nodes info

        """
        nodes_inventory_check = check_path(self.nodes_inventory, isfile=True)

        if nodes_inventory_check:
            with open(r'{}'.format(self.nodes_inventory)) as nodes_inv:
                self.nodes_inv = yaml.safe_load(nodes_inv)
        else:
            logging.error('incorrect nodes inventory specified: {}'.format(self.nodes_inventory))
            sys.exit()
コード例 #5
0
    def get_software_download_dir(self):
        """ 
        get software download directory to download OCP software bits
  
        """
        default = '/home/ansible/files'
        self.software_dir = '/home/ansible/files'
        self.software_dir = set_values(self.software_dir, default)
        dest_path_exist = check_path(self.software_dir, isdir=True)
        if dest_path_exist:
            logging.info('directory {} already exists'.format(self.software_dir))
        else:
            logging.info('Creating directory {}'.format(self.software_dir))
            create_dir(self.software_dir)

        self.inventory_dict['all']['vars']['software_src'] = self.software_dir
コード例 #6
0
    def get_software_download_dir(self):
        """ 
        get software download directory to download OCP 4.3 software bits
  
        """
        self.clear_screen()
        default = '/home/ansible/files'
        self.software_dir = input('provide complete path of directory to download OCP 4.3 software bits\n'
                                  'default [/home/ansible/files]: ')
        self.software_dir = set_values(self.software_dir, default)
        dest_path_exist = check_path(self.software_dir, isdir=True)
        if dest_path_exist:
            logging.info('directory {} already exists'.format(self.software_dir))
        else:
            logging.info('Creating directory {}'.format(self.software_dir))
            create_dir(self.software_dir)

        self.inventory_dict['csah']['vars']['software_src'] = self.software_dir
コード例 #7
0
    def get_software(self):
        """ 
        performs OCP software bits download from the base urls
        specified in the class __init__ 
   
        """

        logging.info('downloading OCP {} software bits into {}'.format(self.software_dir, self.version))
        urlretrieve('{}/sha256sum.txt'.format(self.ocp_client_base_url),'{}/client.txt'.format(self.software_dir))
        urlretrieve('{}/sha256sum.txt'.format(self.ocp_rhcos_base_url),'{}/rhcos.txt'.format(self.software_dir))
        shasum = False
        for url_key in self.ocp_urls.keys():
            url = self.ocp_urls[url_key]
            dest_name = url.split('/')[-1]
            dest_path = self.software_dir + '/' + dest_name
            dest_path_exist = check_path(dest_path, isfile=True)
            url_check = ''
            if dest_path_exist:
                logging.info('file {} already exists in {}'.format(dest_name, self.software_dir))
                shasum = validate_file(self.software_dir, dest_name, self.ocp_rhcos_base_url)
                self.inventory_dict['all']['vars'][url_key] = dest_name
            else:
                url_check = validate_url(url)
                if url_check == '':
                    logging.error('file {} in {} is not available'.format(dest_name, url_key))
                    self.inventory_dict['all']['vars'][url_key] = ''

            if not shasum:
                url_check = validate_url(url)
                if url_check == '':
                    logging.error('file {} in {} is not available'.format(dest_name, url_key))
                    self.inventory_dict['all']['vars'][url_key] = ''

            if url_check != '' and url_check.code == 200 and not shasum:
                logging.info('downloading {}'.format(dest_name))
                urlretrieve('{}'.format(url),'{}/{}'.format(self.software_dir, dest_name))
                self.inventory_dict['all']['vars'][url_key] = dest_name
コード例 #8
0
 def web_screenshot(self,tasktype:str):
     driver = self.driver
     path = os.path.join('transtweet','web_screenshot')
     check_path(path)
     save_filename = os.path.join('cache',path,tasktype+'.png')
     driver.get_screenshot_as_file(save_filename)
コード例 #9
0
 def saveTweetsToJson(self,tweets,tasktype:str):
     path = os.path.join('transtweet','res_tweets_json')
     check_path(path)
     filename = tasktype+'.json'
     ttweets = self.dealTweets(tweets)
     data_save(filename,ttweets,path)
コード例 #10
0
# -*- coding: UTF-8 -*-
import os
import traceback
import time
import json
import random

from helper import check_path,TokenBucket
from selenium import webdriver
from selenium.webdriver.chrome.webdriver import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from helper import getlogger,data_save,data_read
logger = getlogger(__name__)
check_path(os.path.join('transtweet','error'))
check_path(os.path.join('transtweet','unknownimg'))
check_path(os.path.join('transtweet','tweetimg'))
check_path(os.path.join('transtweet','tweetsimg'))
check_path(os.path.join('transtweet','transimg'))

def randUserAgent():
    UAs = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2919.83 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2866.71 Safari/537.36',
        'Mozilla/5.0 (X11; Ubuntu; Linux i686 on x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2820.59 Safari/537.36',
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
    ]
    return UAs[random.randint(0,len(UAs)-1)]
コード例 #11
0
ファイル: run_pipeline.py プロジェクト: andershbf/PRISM
def predict_stability(args):
    logging.info("Starting pipeline")
    os.chdir(os.getcwd())
    logging.info(f'Current working directory: {os.getcwd()}')

    # Obtain, redirect and adapt user arguments
    chain_id = args.CHAIN
    ddgfile = check_path(args.DDG_FLAG_FILE)
    mode = args.MODE
    mutation_input = check_path(args.MUTATION_INPUT)
    outpath = check_path(args.OUTPUT_FILE)
    overwrite_path = args.OVERWRITE_PATH
    relaxfile = check_path(args.RELAX_FLAG_FILE)
    structure_list = check_path(args.STRUC_FILE)
    uniprot_accesion = check_path(args.UNIPROT_ID)
    run_struc = args.RUN_STRUC
    ligand = args.LIGAND
    mp_span = args.MP_SPAN_INPUT
    verbose = args.VERBOSE
    partition=args.SLURM_PARTITION

    if run_struc == None:
        run_struc = chain_id
    # System name
    name = os.path.splitext(os.path.basename(structure_list))[0]

    # Initiate folder structure
    folder = folder2(outpath, overwrite_path, is_mp=args.IS_MP)
    logger = make_log(folder,verbose)

    # Store input files
    input_dict = storeinputs.storeinputfuc(name, args, folder)

    if mode == "proceed" or mode == "relax" or mode == "ddg_calculation":
        mutation_input == "proceed"
        logger.info(f'No preparation, proceeding to execution')

    # Preprocessing
    if mode == 'create' or mode == 'fullrun':
        logger.info(f'Preparation started')
        # Get input files
        prep_struc = create_copy(
            input_dict['STRUC_FILE'], folder.prepare_input, name='input.pdb')

        # Defining structure parameters

        # Create structure instance
        logger.info(f'Creating structure instance')
        structure_instance = structure(chain_id,name,folder,prep_struc,run_struc,logger,uniprot_accesion=uniprot_accesion,)
        run_name = 'input'

        # adjust mp structure if MP_ALIGN_MODE is selected
        if args.IS_MP == True and args.MP_ALIGN_MODE != 'False':
            logger.info(f'Align the structure along the membrane using {args.MP_CALC_SPAN_MODE}')
            if args.MP_ALIGN_MODE == 'OPM':
                if args.MP_ALIGN_REF != '':
                    run_name = 'input_mp_aligned'
                    structure_instance.path = os.path.join(
                        folder.prepare_mp_superpose, f'{run_name}.pdb')
                    try:
                        mp_prepare.mp_superpose_opm(
                            args.MP_ALIGN_REF, prep_struc, structure_instance.path, target_chain=structure_instance.chain_id, write_opm=True)
                    except:
                        mp_prepare.mp_TMalign_opm(
                            args.MP_ALIGN_REF, prep_struc, structure_instance.path, target_chain=structure_instance.chain_id, write_opm=True)                        
                elif args.UNIPROT_ID != '':
                    logger.error('Uniprot-ID to ref pdb not implemented yet')
                    sys.exit()
                else:
                    logger.error(
                        'No reference or Uniprot-ID provided. Automatic extraction via sequence not yet implemented.')
                    sys.exit()
            else:
                logger.error(
                    'Other modes (PDBTM, TMDET, MemProtMD) not yet implemented.')
                sys.exit()

        structure_dic = get_structure_parameters(
            folder.prepare_checking, prep_struc)

        # Cleaning pdb and making fasta based on pdb or uniprot-id if provided
        logger.info(f'Prepare the pdb and extract fasta file')
        structure_instance.path_to_cleaned_pdb, struc_dic_cleaned = structure_instance.clean_up_and_isolate()
        structure_instance.fasta_seq = pdb_to_fasta_seq(
            structure_instance.path_to_cleaned_pdb)
        if uniprot_accesion != "":
            structure_instance.uniprot_seq = read_fasta(
                uniprot_accesion)
            structure_instance.muscle_align_to_uniprot(structure_instance.uniprot_seq)
        else:
            structure_instance.muscle_align_to_uniprot(structure_instance.fasta_seq)

        # Get span file for mp from cleaned file if not provided
        if args.IS_MP == True:
            if input_dict['MP_SPAN_INPUT'] == None:
                logger.info(f'Calculate span file with option {args.MP_CALC_SPAN_MODE}')
                if args.MP_CALC_SPAN_MODE == 'DSSP':
                    structure_instance.span = mp_prepare.mp_span_from_pdb_dssp(
                        structure_instance.path_to_cleaned_pdb, folder.prepare_mp_span, thickness=args.MP_THICKNESS, SLURM=False)
                elif args.MP_CALC_SPAN_MODE == 'octopus':
                    structure_instance.span = mp_prepare.mp_span_from_pdb_octopus(
                        structure_instance.path_to_cleaned_pdb, folder.prepare_mp_span, thickness=args.MP_THICKNESS, SLURM=False)
                elif args.MP_CALC_SPAN_MODE == 'False':
                    logger.warn(
                        'No span file provided and no calculation method selected.')
                else:
                    logger.error(
                        'Other modes (struc, bcl, Boctopus) not yet implemented.')
                    sys.exit()
            elif input_dict['MP_SPAN_INPUT'] != None:
                structure_instance.span = create_copy(
                    input_dict['MP_SPAN_INPUT'], folder.prepare_mp_span, name='input.span')

        # Making mutfiles and checks
        print(f'Convert prism file if present: {input_dict["PRISM_INPUT"]}')
        if input_dict['PRISM_INPUT'] == None:
            new_mut_input = input_dict['MUTATION_INPUT']
        #    mut_dic = get_mut_dict(input_dict['MUTATION_INPUT'])
        else:
            new_mut_input = os.path.join(folder.prepare_input, 'input_mutfile')
            mut_dic = prism_to_mut(input_dict['PRISM_INPUT'], new_mut_input)

        logger.info(f'Generate mutfiles.')
        print(input_dict['MUTATION_INPUT'])
        
        check2 = structure_instance.make_mutfiles(
            new_mut_input)
        check1 = compare_mutfile(structure_instance.fasta_seq,
                                 folder.prepare_mutfiles, folder.prepare_checking, new_mut_input)
        check3, errors = pdbxmut(folder.prepare_mutfiles, struc_dic_cleaned)
        check2 = False

        if check1 == True or check2 == True or check3 == True:
            print("check1:", check1, "check2:", check2, "check3:", check3)
            logger.error(
                "ERROR: STOPPING SCRIPT DUE TO RESIDUE MISMATCH BETWEEN MUTFILE AND PDB SEQUENCE")
            sys.exit()

        # Create hard link to mutfile directory and to output structure
        prepare_output_struc = create_copy(
            structure_instance.path_to_cleaned_pdb, folder.prepare_output, name='output.pdb')
        if args.IS_MP == True:
            prepare_output_span_dir = create_copy(folder.prepare_mp_span, f'{folder.prepare_output}', name='spanfiles', directory=True)
        else:
            prepare_output_ddg_mutfile_dir = create_copy(
                folder.prepare_mutfiles, folder.prepare_output, name='mutfiles', directory=True)

        # Copy files for relax & run
        relax_input_struc = create_copy(
            prepare_output_struc, folder.relax_input, name='input.pdb')

        # Generate sbatch files
        logger.info(f'Generate sbatch files')
        if args.IS_MP == True:

            # copy MP relax input files
            logger.info('Copy MP relax input files')
            relax_input_xml = create_copy(
                input_dict['RELAX_XML_INPUT'], folder.relax_input, name='relax.xml')
            relax_input_span_dir = create_copy(
                prepare_output_span_dir, folder.relax_input, name='spanfiles', directory=True)

            # Parse sbatch relax file
            logger.info('Create MP relax sbatch files.')
            path_to_relax_sbatch = mp_prepare.rosetta_relax_mp(
                folder, SLURM=True, num_struc=3, sys_name=name, partition=partition)

            # Parse sbatch relax parser
            path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch(
                folder, sys_name=f'{name}_relax', sc_name='relax_scores', partition=args.SLURM_PARTITION)

            # Parse sbatch ddg file
            ddg_input_ddgfile = create_copy(
                input_dict['DDG_FLAG_FILE'], folder.ddG_input, name='ddg_flagfile')
            ddg_input_span_dir = create_copy(
                prepare_output_span_dir, folder.ddG_input, name='spanfiles', directory=True)

            if args.MP_PH == -1:
                is_pH = 0
                pH_value = 7
            else:
                is_pH = 1
                pH_value = args.MP_PH
            path_to_ddg_calc_sbatch = mp_ddG.rosetta_ddg_mp_pyrosetta(
                folder, mut_dic, SLURM=True, sys_name=name, partition=args.SLURM_PARTITION,
                repack_radius=args.BENCH_MP_REPACK, lipids=args.MP_LIPIDS,
                temperature=args.MP_TEMPERATURE, repeats=args.BENCH_MP_REPEAT,
                is_pH=is_pH, pH_value=pH_value)
            # Parse sbatch ddg parser
            path_to_parse_ddg_sbatch = mp_ddG.write_parse_rosetta_ddg_mp_pyrosetta_sbatch(
                folder, uniprot=args.UNIPROT_ID, sys_name=name, output_name='ddG.out', partition=partition)
        else:
            # Parse sbatch relax file
            relax_input_relaxfile = create_copy(
                input_dict['RELAX_FLAG_FILE'], folder.relax_input, name='relax_flagfile')
            path_to_relax_sbatch = structure_instance.rosetta_sbatch_relax(
                folder, relaxfile=relax_input_relaxfile, sys_name=name,  partition=partition)
            # Parse sbatch relax parser
            path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch(
                folder,  partition=partition)
            # Parse sbatch relax parser
            path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch(
                folder, partition=args.SLURM_PARTITION)

            # Parse sbatch ddg file
            ddg_input_ddgfile = create_copy(
                input_dict['DDG_FLAG_FILE'], folder.ddG_input, name='ddg_flagfile')
            ddg_input_mutfile_dir = create_copy(
                prepare_output_ddg_mutfile_dir, folder.ddG_input, name='mutfiles', directory=True)
            path_to_ddg_calc_sbatch = structure_instance.write_rosetta_cartesian_ddg_sbatch(
                folder, ddg_input_mutfile_dir, ddgfile=ddg_input_ddgfile, sys_name=name,  partition=partition)
            # Parse sbatch ddg parser
            path_to_parse_ddg_sbatch = structure_instance.write_parse_cartesian_ddg_sbatch(
                folder,  partition=partition)
            # Parse sbatch ddg parser
            path_to_parse_ddg_sbatch = structure_instance.write_parse_cartesian_ddg_sbatch(
                folder, structure_instance.fasta_seq, structure_instance.chain_id, sys_name=name, partition=args.SLURM_PARTITION)

    # Execution
    # Single SLURM execution
    if mode == 'relax':
        parse_relax_process_id = run_modes.relaxation(folder)
        relax_output_strucfile = find_copy(
            folder.relax_run, '.pdb', folder.relax_output, 'output.pdb')


# if SLURM == False:
#    path_to_scorefile = os.path.join(structure_instance.path_to_run_folder + '/relax_scores.sc')
#    relax_pdb_out = relax_parse_results.parse_relax_results(path_to_scorefile, path_to_run_folder)
# else:
#    path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch(os.path.join(structure_instance.path_to_run_folder + '/relax_scores.sc'), structure_instance.path_to_run_folder)
#    relax_pdb_out = parse_relax_process_id = run_modes.relaxation(structure_instance.path_to_run_folder)
# logger.info(f"Relaxed structure for ddG calculations: {relax_pdb_out}")

    if mode == 'ddg_calculation':
        run_modes.ddg_calculation(folder)
#        ddg_output_score = find_copy(
#            folder.ddG_run, '.sc', folder.ddG_output, 'output.sc')

    if mode == 'analysis':
        calc_all(folder, sys_name=name)
        plot_all(folder, sys_name=name)

    # Full SLURM execution
    if mode == 'proceed' or mode == 'fullrun':
        # Start relax calculation
        parse_relax_process_id = run_modes.relaxation(folder)
        # relax_output_strucfile = find_copy(
        # folder.relax_run, '.pdb', folder.relax_output, 'output.pdb')
        # Start ddG calculation
        # ddg_input_struc = create_copy(
        # os.path.join(folder.relax_output, 'output.pdb'), folder.ddG_input,
        # name='input.pdb')
        run_modes.ddg_calculation(folder, parse_relax_process_id)
コード例 #12
0
            return (False,'推送对象不存在!')
        if tweet_user_id not in self.__push_list[message_type][pushTo]['pushunits']:
            return (False,'推送单元不存在!')
        if key == 'nick' or key == 'des':
            self.__push_list[message_type][pushTo]['pushunits'][tweet_user_id][key] = value
        else:
            self.__push_list[message_type][pushTo]['pushunits'][tweet_user_id]['config'][key] = value
        return (True,'属性已更新')
#字符串模版
class tweetToStrTemplate(string.Template):
    delimiter = '$'
    idpattern = '[a-z]+_[a-z_]+'
#缩写推特ID(缓存1000条)
mintweetID = TempMemory(def_puth_method + '_' + 'mintweetID.json',limit = 1000,autosave = True,autoload = True)
#推文缓存(150条/监测对象)
check_path(os.path.join('templist','twitterApi'))
tweetsmemory = {}
#推特用户缓存(1000条)
userinfolist = TempMemory(def_puth_method + '_' + 'userinfolist.json',limit = 1000,autosave = False,autoload = False)
class tweetEventDeal:
    #检测个人信息更新
    def check_userinfo(self, userinfo, isnotable = False, trigger = True):
        global userinfolist
        """
            运行数据比较
            用于监测用户的信息修改
            用户ID screen_name
            用户昵称 name
            描述 description
            头像 profile_image_url
        """
コード例 #13
0
from html.parser import HTMLParser
from os import path
import os
import requests
import xmltodict
import threading
import traceback
import time
import queue
#引入配置
import config
#日志输出
from helper import data_read, data_save, getlogger, TempMemory
logger = getlogger(__name__)

check_path(os.path.join('templist', 'RSShub', 'twitter'))

silent_start = config.RSShub_silent_start  #静默启动(启动时不推送更新)
base_url = config.RSShub_base
proxy = config.RSShub_proxy
headers = {'User-Agent': 'CQpy'}
proxies = {"http": proxy, "https": proxy}
tmemorys = {}

tweetuserlist_filename = 'RSShub_tweetuserlist.json'
tweetuserlist = {}
res = data_read(tweetuserlist_filename)
if res[0]:
    tweetuserlist = res[2]

コード例 #14
0
ファイル: music.py プロジェクト: OkayuDeveloper/OkayuTweetBot
from nonebot import on_command, CommandSession,permission as perm
import asyncio
import traceback
from helper import getlogger,msgSendToBot,CQsessionToStr,check_path
from module.roll import match_roll
import config
from os import path
logger = getlogger(__name__)
__plugin_name__ = '音乐'
__plugin_usage__ = r"""
音乐!!!!!
"""
check_path('music')
music_path = path.join(config.music_path,'music','')

#预处理
def headdeal(session: CommandSession):
    if session.event['message_type'] == "group" and session.event.sub_type != 'normal':
        return False
    return True

#1376882360 希望之花 163
#442589034 RAGE OF DUST 卡其脱离态 163
@on_command('希望之花',only_to_me = False)
async def xwzh(session: CommandSession):
    if not headdeal(session):
        return
    await session.send("[CQ:music,type=163,id=1376882360]")
@on_command('卡其脱离态',only_to_me = False)
async def kqtlt(session: CommandSession):
    if not headdeal(session):