Example #1
0
 def get_global_conf(self):
     conf_result={}
     if os.isfile('../conf/global/master'):
         pass
     if os.isfile('../conf/global/slave'):
         pass
     conf_result['master']="coffee"
     conf_result['slave']="test01"
     return conf_result
Example #2
0
def test_rename(builddir, runner):
    # prepare needed files
    with local.cwd(builddir):
        sh.touch('originalfile')

    builder = BuildFile(build_dir=builddir, runner=runner)

    ###### First build ##########
    builder.main(command_line=['-D', 'build'])

    expected_json = {
        ".deps_version": 2,
        "mv originalfile testfile": {
            "originalfile": "input-d41d8cd98f00b204e9800998ecf8427e",
            "testfile": "output-d41d8cd98f00b204e9800998ecf8427e"
        }
    }

    # assertions
    with local.cwd(builddir):
        assert_json_equality('.deps', expected_json)
        assert os.path.isfile('testfile')
        sys.exit.assert_called_once_with(0)

        # update original file to check the rebuild
        (sh.echo["newline"] > "originalfile")()

    ###### Second build ##########
    builder.main(command_line=['-D', 'build'])

    expected_json = {
        ".deps_version": 2,
        "mv originalfile testfile": {
            "originalfile": "input-321060ae067e2a25091be3372719e053",
            "testfile": "output-321060ae067e2a25091be3372719e053"
        }
    }

    with local.cwd(builddir):
        assert_json_equality('.deps', expected_json)
        assert "newline" in sh.cat('testfile')

    ###### Cleaning ##########
    builder.main(command_line=['-D', 'clean'])

    with local.cwd(builddir):
        assert not os.isfile('testfile')
        assert os.isfile('originalfile')
def parse_NCBI_nodes_tab_file(folder):
    """this is a function to open nodes.dmp from the NCBI taxonomy
    database and find the parent child relationship....returns a
    dictionary for later use.
    """
    # open file - read.
    # nodes.dmp - this file is separated by \t|\t
    # empty dictionary to add to parent and child (keys,vals) to
    tax_dictionary = {}
    # nodes.dmp files goes: child, parent, etc
    # merged.dmp file goes: old, new
    # In both cases, can take key as column 0 and value as column 1
    for filename in ["nodes.dmp", "merged.dmp"]:
        if not os.isfile(filename):
            print("Could not find %s. Please check this." % filename)
            os._exit(0)
        with open(os.path.join(folder, filename)) as handle:
            for line in handle:
                tax_info = line.replace("\n", "\t").split("\t|\t")
                # first element
                parent = tax_info[1]
                # second element
                child = tax_info[0]
                # add these to the dictionary {parent:child}
                tax_dictionary[child] = parent
    # print(tax_dictionary)
    return tax_dictionary
Example #4
0
 def add_folder(self, path):
     """
     添加path路径中所有的账号,返回一个字典
     规则:如果出现文件夹,则单独记录到一个类别中,一层文件夹下的所有账号密码都记录在
     同一个类别
     """
     def search_sub_folder(dic,path):        
         files = os.listdir(path)
         for file in files:
             if os.isfile(file):
                 f = open(path.os.sep+file, r)
                 content = f.read()
                 time = time.strftime('%Y/%m/%d %H:%M',time.localtime(time.time()))
                 dic[file] = [content, time]
             else:
                 search_folder(dic,path+os.sep+file)
 
     data = {}
     #1.扫描到path中所有文件/文件夹的名称,添加到列表
     files = os.listdir(path)
     #2.逐个读取文件,文件夹,按照规则添加到数据文件中
     for file in files:
         if os.isfile(file):
             f = open(path+os.sep+file,r)
             content = f.read()
             time = time.strftime('%Y/%m/%d %H:%M',time.localtime(time.time()))
             data[file] = [content, time]
         else:
             #假如是文件夹
             data[file] = {}
             search_sub_folder(data[file],path+os.sep+fil)
     return data
Example #5
0
def test_xla_profiler_prog_capture(tmpdir):

    port = xu.get_free_tcp_ports()[0]
    training_started = Event()

    def train_worker():
        model = BoringModel()
        trainer = Trainer(default_root_dir=tmpdir,
                          max_epochs=4,
                          profiler="xla",
                          accelerator="tpu",
                          devices=8)

        trainer.fit(model)

    p = Process(target=train_worker, daemon=True)
    p.start()
    training_started.wait(120)

    logdir = str(tmpdir)
    xp.trace(f"localhost:{port}",
             logdir,
             duration_ms=2000,
             num_tracing_attempts=5,
             delay_ms=1000)

    p.terminate()

    assert os.isfile(
        os.path.join(logdir, "plugins", "profile", "*", "*.xplane.pb"))
def load(path):
    if not os.path.exists(path):
        print('Load path doesnt exist!')
    else:
        files = [f for f in os.listdir(path) if os.isfile(os.path.join(path, f))].sort()
    
    return files
Example #7
0
 def register_model(self, model_name):
     if not self.trained:
         if os.isfile(self.this_job_path + '/torchmodel.pth'):
             self.trained = True
     # add model information to database(file db or web db)
     if self.trained:
         # create model folder
         model_root_path = self.workspace_path + '/model'
         model_path = model_root_path + '/' + model_name
         createDirectory(model_root_path)
         createDirectory(model_path)
         if self.net_name is not "":
             # copy network file to model path
             # $WORKSPACE/nets{net_name} -> $WORKSPACE/model/{model_name}/torchmodel.py
             org_net_path = self.workspace_path + '/net/' + self.net_name + '.py'
             net_path = model_path + '/torchmodel.py'
             shutil.copy(org_net_path, net_path)
         else:
             self.extract_network()
             org_net_path = self.this_job_path + '/net.py'
             net_path = model_path + '/torchmodel.py'
             shutil.copy(org_net_path, net_path)
         # copy model file ti model path
         # $JOB_PATH/torchmodel.pth -> $WORKSPACE/model/{model_name}/torchmodel.pth
         org_modelfile_path = self.this_job_path + '/torchmodel.pth'
         modelfile_path = model_path + '/torchmodel.pth'
         shutil.copy(org_modelfile_path, modelfile_path)
def write_ls(ls,file):
    if os.isfile(file):
        os.remove( file )
    fp = open( file,'w'  )
    for l in ls:
        fp.write( l  )
    fp.close()
Example #9
0
    def select_diverse_ligands(self, target):
        """


        :return:
        """
        # download ligands
        # convert to fps
        # cluster on ligands
        # update data to only include selected members
        tmp = tempfile.mkdtemp()
        ligands = self.data['ligands']
        for ligand in ligands:
            self._write(ligand, tmp)

        files = [
            os.path.join(tmp, f) for f in os.listdir(tmp)
            if os.isfile(os.path.join(tmp, f))
        ]
        ligands = {
            os.path.basename(f).split(".")[0]: x
            for f in files for x in Chem.ForwardSDMolSupplier(f)
            if x is not None
        }

        for n, l in ligands.items():
            l.SetProp("_Name", n)

        cluster_dict = self._cluster_ligands(ligands=ligands, t=target)
        reps = [l[0] for l in cluster_dict.values() if len(l) != 0]

        print reps
Example #10
0
    def download_equity_M1(self, tasks, startYr=2012, endYr=2015):
        """

		"""

        try:
            # map equity tickers to security IDs.
            if self._mapTickersToSecIDs:
                maps = self._mapTickersToSecIDs
            else:
                assert os.isfile("./names/secID.json")
                jsonFile = open(dName, "r")
                allSecIds = json.loads(jsonFile.read())
                jsonFile.close()
                allTickers = [s.split(".")[0] for s in allSecIds]
                maps = dict(zip(allTickers, allSecIds))
                self._mapTickersToSecIDs = maps
            tasks_ = [maps[task] for task in tasks]

            db = self._dbs["EQU_M1"]["self"]
            self._api.get_equity_M1_interMonth(db, id=1, startYr=startYr, endYr=endYr, tasks=tasks_)
        except AssertionError:
            msg = "[MONGOD]: Cannot map tickers to secIDs; " + "secID.json does not exist."
            raise VNPAST_DatabaseError(msg)
        except Exception, e:
            msg = "[MONGOD]: Unable to download data; " + str(e)
            raise VNPAST_DatabaseError(msg)
Example #11
0
 def save(self, mode, *path):
     path = os.path.join(path)
     if os.isfile(path):
         with open(path, mode) as f:
             return pickle.Unpickler(f).load()
     else:
         return False
Example #12
0
    def download_equity_M1(self, tasks, startYr=2012, endYr=2015):
        """

		"""

        try:
            # map equity tickers to security IDs.
            if self._mapTickersToSecIDs:
                maps = self._mapTickersToSecIDs
            else:
                assert os.isfile('./names/secID.json')
                jsonFile = open(dName, 'r')
                allSecIds = json.loads(jsonFile.read())
                jsonFile.close()
                allTickers = [s.split('.')[0] for s in allSecIds]
                maps = dict(zip(allTickers, allSecIds))
                self._mapTickersToSecIDs = maps
            tasks_ = [maps[task] for task in tasks]

            db = self._dbs['EQU_M1']['self']
            self._api.get_equity_M1_interMonth(db,
                                               id=1,
                                               startYr=startYr,
                                               endYr=endYr,
                                               tasks=tasks_)
        except AssertionError:
            msg = '[MONGOD]: Cannot map tickers to secIDs; ' + \
               'secID.json does not exist.'
            raise VNPAST_DatabaseError(msg)
        except Exception as e:
            msg = '[MONGOD]: Unable to download data; ' + str(e)
            raise VNPAST_DatabaseError(msg)
Example #13
0
def prune(path, **options):
    config = get_configuration(options)

    # TODO: exclude `latest.ext`!
    files = [f for f in os.listdir(path) if os.isfile(f)]
    matches = [f for f in files if f.endswith(extension)]

    if '--max-snapshots' in options:
        if int(options['--max-snapshots']) < len(matches):
            too_much = len(matches) - int(options['--max-snapshots'])
            for i in range(too_much):
                os.remove(files[i])

    if '--max-size' in options:
        size = [os.path.getsize(f) for f in matches]
        if sum(size) > int(options['--max-size']):
            too_much = int(options['--max-size']) - sum(size)
            will_remove = []
            while sum(will_remove) < too_much:
                will_remove.append(size.pop(0))

            to_remove = matches[:len(will_remove)]
            for f in too_old:
                os.remove(f)

    if '--max-days' in options:
        is_too_old = functools.partial(is_old, days=int(options['--max-days']))
        too_old = [f for f in files if is_too_old(to_date(f))]
        # is it safer to os.join this with the cwd?
        for f in too_old:
            os.remove(f)

    if options['--save-configuration']:
        del options['--save-configuration']
        dump_args(config, open('.versioned', 'w'))
Example #14
0
 def info_file(self):
     #crear carpeta
     path = mother_path + '/' + self.name
     print 'guardando en ' + path
     if (not os.path.isdir(path)):
         #si no existe el path, lo crea
         print "Anime no descargado aun, creando carpeta\n" + path
         os.mkdir(path)
     else:
         print "Al parecer ya habias descargado este anime"
         os.system('pause')
     name_path = path + '/info.md'
     #Escribir archivo en path
     if os.isfile(name_path):
         print 'archivo info.md ya esta creado'
         return None
     with open(name_path, 'wb') as f:
         f.write(self.name.encode('utf-8') + '\n')
         f.write(self.state.encode('utf-8') + '\n')
         f.write(self.sinopsis.encode('utf-8') + '\n')
         f.write('Episodios:\n')
         caps = len(self.capitulos)
         i = caps - 1
         while i > 2:
             f.write(self.capitulos[i].encode('utf-8') + '\n')
             i -= 1
         f.close()
     return None
Example #15
0
def disable_internet():
    if os.isfile(internet_tag_file):
        os.remove(internet_tag_file)

    total_subnet = ",".join([net_config["HqCidr"],net_config["VpcCidr"]])
    cmd = internet_cmd % ('-D', total_subnet)
    return exeute_shell(cmd)
def parse_NCBI_nodes_tab_file(folder):
    """this is a function to open nodes.dmp from the NCBI taxonomy
    database and find the parent child relationship....returns a
    dictionary for later use.
    """
    # open file - read.
    # nodes.dmp - this file is separated by \t|\t
    # empty dictionary to add to parent and child (keys,vals) to
    tax_dictionary = {}
    # nodes.dmp files goes: child, parent, etc
    # merged.dmp file goes: old, new
    # In both cases, can take key as column 0 and value as column 1
    for filename in ["nodes.dmp", "merged.dmp"]:
        if not os.isfile(filename):
            print("Could not find %s. Please check this." % filename)
            os._exit(0)
        with open(os.path.join(folder, filename)) as handle:
            for line in handle:
                tax_info = line.replace("\n", "\t").split("\t|\t")
                # first element
                parent = tax_info[1]
                # second element
                child = tax_info[0]
                # add these to the dictionary {parent:child}
                tax_dictionary[child] = parent
    # print(tax_dictionary)
    return tax_dictionary
def get_target_annotations(pset, annot_dir):
    """
    Annotate a the 'TARGET' in the 'drug' slot of a PSet object using mapping from the UniProt idenitifer
    mapping tool API.

    :param pset:
    :param annot_dir:
    :return:
    """
    # Read in drug target annotations and gene annotations
    drug_targets = pd.read_csv(
        os.path.join(annot_dir, 'drugbank_drug_targets_all.csv'))
    rnaseq_df = pset.get("molecularProfiles").get(
        "Kallisto_0.46.1.rnaseq").get("elementMetadata")

    # Map genes to drugbank drug ids
    genes_to_drugs = pd.merge(
        drug_targets.loc[:, ['Name', 'Gene Name', 'Drug IDs']],
        rnaseq_df.loc[:, ['gene_name', 'gene_id']],
        left_on='Gene Name',
        right_on='gene_name')

    # Annotate the genes

    # Expand list columns into rows and annotate drugs
    genes_to_drugs['Drug IDs'] = [
        str.split(ids, '; ') for ids in genes_to_drugs['Drug IDs'].values
    ]
    genes_to_drugs = genes_to_drugs.explode('Drug IDs')

    # Write to disk if necessary.
    file_path = os.path.join(annot_dir, 'drugbank_drug_to_gene_mappings.csv')
    if not os.isfile(file_path):
        pd.write_csv(genes_to_drugs, file_path)
    pass
Example #18
0
 def save_additional_directories(self, dirs):
     for d in dirs:
         echo ">>> Saving " + d + "..."
         if os.isfile(d) or os.isdir(d):
             os.chdir("/")
             archive_path = self.backup_dir + d.replace("/", "_") + ".tar.bz2"
             bz2( tar(d, "c"), "-9", _out=archive_path )
Example #19
0
	def save(self, mode, *path):
		path = os.path.join(path)
		if os.isfile(path):
			with open(path, mode) as f:
				return pickle.Unpickler(f).load()
		else:
			return False
Example #20
0
def run(site):
    #Create tracking file for which videos have been analyzed
    trackFile = path+"processed.txt"
    print("TrackFile: ", trackFile)
    if not os.path.isfile(trackFile):
        f = open(trackFile,'a+')
        f.close()

    # Go through every video in S3 bucket 
    # "untrunc" the video
    # Reupload new video to S3
    for s3_file in s3.Bucket(VIDEO_BUCKET_NAME).objects.filter(Prefix='videos/'+site):
        substring = s3_file.key.split("/")
        filename  = substring[-1]
        
        if filename.endswith(".mp4"):
            #Skip video fixing if already done
            if s3_file.key in open(doneFile).read():
                print("\n\nAlready read:"+ s3_file.key)
                continue
            else:
                print("\n\nAnalyzing "+ s3_file.key +" video...")

                #Split key name
                vidName  = filename.split('.')[0]
                time     = substring[-2]
                date     = substring[-3]
                facility = substring[-4]
                print("Video name: " + vidName)
                print("Facility: "   + facility)

                #Download video from S3
                print("Downloading video from S3...")
                videoFullPath = vidPath+filename
                s3.Bucket(VIDEO_BUCKET_NAME).download_file(s3_file.key, videoFullPath)
                print("Finished downloading video")

                #Create new directory for fixed video
                newVidPath = vidPath+facility+'/'+date+'/'+time+'/'
                if not os.path.exists(newVidPath):
                    os.makedirs(newVidPath)

                #Fix Video
                untruncVideo(filename, newVidPath)

                #Sync video to S3
                if os.isfile(newVidPath+filename):
                    source = newVidPath+videoName 
                    destination = 's3://'+str(VIDEO_BUCKET_NAME)+'/*/full_videos_fixed/'+facility+'/'+date+'/'+time+'/'
                    uploadToS3("sync", source, destination)  

                #Remove video
                print("Done analyzing, removing current video file...")
                os.remove(videoFullPath)

                #Add video to doneFile
                f = open(trackFile,'a')
                f.write(str(s3_file.key)+'\n')
                f.close()
                print("Moving onto next video...\n")
Example #21
0
	def run(self):
		iter = mcl_input_iterator(self.inf)
		for mcl_input_block in iter:
			out_block = mcl_input_block.readline()
			mclinf = open(self.mclinput_fname, 'w')
			mclinf.write(mcl_input_block.read())
			mclinf.close()
			parameter = self.parameter.split()
			wl = ['mcl', self.mclinput_fname, '-o',self.mcloutput_fname] + parameter
			try:
				os.spawnvp(os.P_WAIT, 'mcl', wl)
			except:
				sys.stderr.write('MCL running error.\n')
				os.remove(self.mclinput_fname)
				if os.isfile(self.mcloutput_fname):
					os.remove(self.mcloutput_fname)
				sys.exit(1)
			out_block += '(parameter %s )\n'%self.parameter
			mcloutf = open(self.mcloutput_fname, 'r')
			out_block += mcloutf.read()
			mcloutf.close()
			self.outf.write(out_block)
			
		os.remove(self.mclinput_fname)
		os.remove(self.mcloutput_fname)
def rinex_renamer(input_rinex_path,output_directory,stat_out_name='',remove=False):

    if stat_out_name == '':
        stat_out_name = os.path.basename(input_rinex_path)[0:4]

    stat_out_name = stat_out_name.lower()

    inp_rinex_obj=open(input_rinex_path,'r+')
    out_dir = output_directory # nom redondant mais j'ai la flemme d'aller corriger le nom de la variable

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    os.chdir(out_dir)

    first_epoch , last_epoch = rinex_start_end(input_rinex_path)
    rnx_interval_ext = rinex_session_id(first_epoch,last_epoch) + '.'
    rinex_out_name = stat_out_name + first_epoch.strftime('%j') + rnx_interval_ext + first_epoch.strftime('%y') + 'o'

    print(rinex_out_name)

    output_rinex_path = os.path.join(out_dir,rinex_out_name)

    if input_rinex_path != output_rinex_path:
        print("INFO : copy of ", input_rinex_path , ' to ' , output_rinex_path)
        shutil.copy(input_rinex_path,output_rinex_path)

        if remove and os.isfile(output_rinex_path) :
            print("INFO : removing " , input_rinex_path)
            os.remove(input_rinex_path)
    else:
        print("INFO : " , input_rinex_path)
        print("and", output_rinex_path ,"are the same file")
        print("nothing's done ...")

    return output_rinex_path
Example #23
0
def enable_internet():
    # create a file to indicate the state of internet connection
    if not os.isfile(internet_tag_file):
        open(internet_tag_file, "a").close()

    total_subnet = ",".join([net_config["HqCidr"],net_config["VpcCidr"]])
    cmd = internet_cmd % ('-A', total_subnet)
    return exeute_shell(cmd)
Example #24
0
 def build_model_from_dirents(self, dirent):
     for subdir in os.listdir(dirent):
         if os.path.isdir(subdir):
             self.build_feature(subdir)
             for filename in os.listdir(subdir):
                 if os.isfile(filename) and re.match(".*.sql$", filename):
                     self.build_scenario(filename)
     return Model(self.features)
Example #25
0
def calcula_tamanho_pasta(pasta):
    tamanhoTotal = 0
    ficheiros = os.listdir(pasta)
    for ficheiro in ficheiros:
        if os.isfile(join(pasta, ficheiro)):
            tamanhoTotal += os.path.getsize(join(pasta, ficheiro)) / 1024

    return tamanhoTotal
Example #26
0
def find_in_path(basename):
	path_ary = os.getenv(PATH).split(':')
	for p in path_ary:
		path = "%s/%s" % (p, basename)
		if os.access(path, os.X_OK) and os.isfile(path):
			return path

	return False
def find_in_path(basename):
    path_ary = os.getenv(PATH).split(':')
    for p in path_ary:
        path = "%s/%s" % (p, basename)
        if os.access(path, os.X_OK) and os.isfile(path):
            return path

    return False
Example #28
0
 def build_model_from_dirents(self, dirent):
     for subdir in os.listdir(dirent):
         if os.path.isdir(subdir):
             self.build_feature(subdir)
             for filename in os.listdir(subdir):
                 if os.isfile(filename) and re.match(".*.sql$", filename):
                     self.build_scenario(filename)
     return Model(self.features)
def countFiles(path,num=0):
    for content in listdir(path):
        if isfile(content):
            num+=1
    for content in listdir(path):
        if isdir(content):
            num+=countFiles(join(path,content))
    return num
Example #30
0
 def setValue(self,  value):
     """
     Sets value and filename for this object.
     """
     if os.isfile(value): # allow to overwrite  files only with existing files
         self.varValue = value
         self.filename = os.path.basename(value)
     else:
         raise IncorrectValue,  value+" in PathToFile.setValue()"
Example #31
0
def convert_to_npy(args):
    if not isinstance(args, tuple):
        key = "data"
        npz_file = args
    else:
        npz_file, key = args
    if not os.isfile(npz_file[:-3] + "npy"):
        a = np.load(npz_file)[key]
        np.save(npz_file[:-3] + "npy", a)
Example #32
0
def precondition_check():
	"""f() -> None

	Disallow running the program if the C version generated files are not there.
	"""
	for filename in [WITH_PASS_EPAK, NO_PASS_EPAK]:
		if not os.isfile(filename):
			print "Missing %r" % filename
			sys.exit(1)
def dirSize(path,size=0):
    from os import listdir,isfile,isdir,stat
    for content in listdir(path):
        if isfile(content):
            size+=stat(join(path,content)).st_size
    for content in listdir(path):
        if isdir(content):
            size+=dirSize(join(path,content))
    return size
Example #34
0
    def __init__(self, partName, unixEpochTime, configurationRef, partStatus):
        self.partName = partName
        self.unixEpochTime = unixEpochTime
        self.fileName = self.partName = MARK_DOWN_EXTENSION
        self.configurationRef = configurationRef
        self.partStatus = partStatus

        if (os.isfile(filename)):
            f = open('r',self.fileName)
            line = f.readline()
Example #35
0
def stream(data, song_name):
    song = ("%s.mp3", song_name)
    if os.isfile(song):
        mixer.music.load(song)
        mixer.music.play()
    else:
        with open(song, 'wb') as file:
            file.write(data)
        mixer.music.load(song)
        mixer.music.play()
Example #36
0
def default_options(action):
    """Enable or Disable All Available options"""
    for k in option_dict:
        file = os.path.join(config_dir, k)
        if action == 'enable':
            with open(file, 'w') as f:
                f.close
        else:
            if os.isfile(file):
                os.remove(file)
Example #37
0
def main():
    if sys.argv[1] == "-c":
        print "List of missing files/dirs:"
        backup_list = open("/home/tj/.backup-list")
        for line in backup_list:
            path = os.path.join("/home/tj", line)
            if os.isdir(path) or os.isfile(path):
                print line
        print "End of List"
        sys.exit(0)
Example #38
0
	def from_dir(self, dir):
		list = SourceList()
		for ele in os.listdir(dir):	
			if ele.startswith('.'):
				continue

			if os.isfile(ele):
				file = ele
			else:
				file = ele+'/'+ele
Example #39
0
 def __init__(self, directory=""):
     self.directory = directory
     if len(self.directory) > 0:
         self.ls = [
             f for f in listdir(self.directory)
             if isfile(join(self.directory, f)) and f.endswith(".csv")
         ]
     print(
         "Note : It's better to make use of pandas.\n\n\tpd.read_csv(\"actors.csv\").to_dict(orient=\"row\")\n"
     )
Example #40
0
 def search_sub_folder(dic,path):        
     files = os.listdir(path)
     for file in files:
         if os.isfile(file):
             f = open(path.os.sep+file, r)
             content = f.read()
             time = time.strftime('%Y/%m/%d %H:%M',time.localtime(time.time()))
             dic[file] = [content, time]
         else:
             search_folder(dic,path+os.sep+file)
Example #41
0
 def get_bucket_owner(self, bucket):
     """ returns the bucket owner """
     path = os.path.join(self.id_to_filename(bucket), '_owner')
     if not os.isfile(path):
         return "nobody"
     f = open("r", path)
     owner = f.read()
     f.close()
     owner = owner.rstrip("\n")
     return owner
Example #42
0
    def scan(self, payload, **kwargs):
        """
        Scan a payload using TRiD

        :param bytes payload: Payload to be scanned
        :param **kwargs kwargs: Additional parameters (unused)

        :returns: Results from scan
        :rtype: dict or None

        """

        results = []

        path = self.stoq.write(path=self.stoq.temp_dir,
                               payload=payload,
                               binary=True)

        if not os.path.isfile(self.bin):
            self.stoq.log.error("TrID does not exist at {}!".format(self.bin))
            return None

        # Build our command and then execute it
        cmd = [self.bin, "-d:{}".format(self.defs), path]
        trid_results = check_output(cmd).splitlines()

        # If there are results, iterate over them and build our blob
        if len(trid_results) > 0:
            if trid_results[7].startswith("Warning".encode()):
                start_pos = 10
            else:
                start_pos = 7

            for line in trid_results[start_pos:]:
                line = line.decode().split()
                if len(line) > 1:
                    r = {}
                    r['likely'] = line[0]
                    r['extension'] = line[1]
                    r['type'] = ' '.join(line[2:])
                    results.append(r)

        # Time to cleanup if we wrote a temp file to disk
        try:
            if os.isfile(path):
                os.remove(path)
        except:
            pass

        super().scan()

        if results:
            return results
        else:
            return None
Example #43
0
def _check_cram_fasta_input(urls_dict, ref_fasta):
    """
    Ensure reference FASTA file is provided if 
    """

    ftypes = [vals['ftype'] for vals in urls_dict.values()]
    if 'cram' in ftypes:
        if not os.isfile(ref_fasta):
            err = 'INPUT ERROR: input .tsv contains one or more CRAM files ' + \
                  'but --ref-fasta not specified'
            exit(err)
Example #44
0
    def scan(self, payload, **kwargs):
        """
        Scan a payload using XORSearch

        :param bytes payload: Payload to be scanned
        :param **kwargs kwargs: Additional parameters (unused)

        :returns: Results from scan
        :rtype: dict or None

        """

        results = []

        path = self.stoq.write(path=self.stoq.temp_dir,
                               payload=payload,
                               binary=True)

        if not os.path.isfile(self.bin):
            self.log.error("XORSearch does not exist at {}!".format(self.bin))
            return None

        # Build our command and then execute it
        cmd = [self.bin, '-f', self.terms, path]
        process_results = check_output(cmd).splitlines()

        # If there are results, iterate over them and build our blob
        if len(process_results) > 0:
            for line in process_results:
                line = line.decode()
                result = line.split()
                hit = line.split(': ')
                r = {}
                # We are going to skip over hits that are not xor'd
                if result[2] != '00':
                    r['key'] = result[2]
                    r['pos'] = result[4].replace(':', '')
                    r['str'] = hit[1]
                    results.append(r)

        # Time to cleanup if we wrote a temp file to disk
        try:
            if os.isfile(path):
                os.remove(path)
        except:
            pass

        super().scan()

        # Return our results
        if results:
            return results
        else:
            return None
Example #45
0
    def scan(self, payload, **kwargs):
        """
        Scan a payload using XORSearch

        :param bytes payload: Payload to be scanned
        :param **kwargs kwargs: Additional parameters (unused)

        :returns: Results from scan
        :rtype: dict or None

        """

        results = []

        path = self.stoq.write(path=self.stoq.temp_dir,
                               payload=payload,
                               binary=True)

        if not os.path.isfile(self.bin):
            self.stoq.log.error("XORSearch does not exist at {}!".format(self.bin))
            return None

        # Build our command and then execute it
        cmd = [self.bin, '-f', self.terms, path]
        process_results = check_output(cmd).splitlines()

        # If there are results, iterate over them and build our blob
        if len(process_results) > 0:
            for line in process_results:
                line = line.decode()
                result = line.split()
                hit = line.split(': ')
                r = {}
                # We are going to skip over hits that are not xor'd
                if result[2] != '00':
                    r['key'] = result[2]
                    r['pos'] = result[4].replace(':', '')
                    r['str'] = hit[1]
                    results.append(r)

        # Time to cleanup if we wrote a temp file to disk
        try:
            if os.isfile(path):
                os.remove(path)
        except:
            pass

        super().scan()

        # Return our results
        if results:
            return results
        else:
            return None
Example #46
0
def enqueue(params, email, qdir=QUEUEDIR):
    """
    This is a function to enqueue messages
    """
    # Make the directory to queue messages if doesn't exist
    if not os.path.exists(qdir):
        os.makedirs(qdir)

    # Construct the base of the file
    timestamp = time.strftime("%Y-%m-%d-%H.%M.%S", time.gmtime())
    base = qdir + "/" + timestamp
    i=0
    while (os.isfile(base + ".mail") or os.isfile(base + ".msmtp")):
        i += 1
        base = qdir + "/" + timestamp + "-" + i

    with os.open(base + ".mail", os.O_WRONLY, int("0600", 8)) as fem, os.open(base + ".msmtp", os.O_WRONLY, int("0600", 8)) as fms:
        # Print strings into files
        print(params, fms)
        print(message, fem)
Example #47
0
def runqueue(qdir=QUEUEDIR, maxwait=120, lockfile=LOCKFILE):
    # Wait for a lock that another instance has set
    wait = 0
    for i in range(maxwait):
        if os.isfile(lockfile):
            time.sleep(1)
        else:
            break

    if os.isfile(lockfile):
        print("Cannot use the queuedir, because another instance is already using it")
        print("Remove the lockfile if that's not the case")

    # Check for empty queuedir
    if len(os.listdir(qdir)) == 0:
        print("No mails in the queuedir")

    # Lock the directory
    touch(lockfile)

    # Process all mails
    for mailfile in glob.glob(qdir + "/*.mail"):
        msmtpfile = os.path.splitext(mailfile)[0] + ".msmtp"
        print("Sending")

        with os.open(msmtpfile) as f:
            msmtp_opts = f.read()


        if 0 != Call(["msmtp", msmtp_opts, "<", mailfile]):
            os.remove(msmtpfile)
            os.remove(mailfile)
            print("Sent")
        else:
            print("msmtp could not process the message")
    

    # Unlock the directory
    os.rm(lockfile)

    return 0
Example #48
0
 def __init__(self, m_init, Ms, d_mxHxm=0.1, name=None):
     if isinstance(m_init, str):
         if os.isfile(m_init):
             self.m_init = m_init
         else:
             raise JoommfError("Magnetisation file not found")
     else:
         self.m_init = m_init
     # Want to throw a warning here if neither
     self.Ms = Ms
     self.d_mxHxm = d_mxHxm
     self.name = name
Example #49
0
 def __init__(self, m_init, Ms, d_mxHxm=0.1, name=None):
     if isinstance(m_init, str):
         if os.isfile(m_init):
             self.m_init = m_init
         else:
             raise JoommfError("Magnetisation file not found")
     else:
         self.m_init = m_init
     # Want to throw a warning here if neither
     self.Ms = Ms
     self.d_mxHxm = d_mxHxm
     self.name = name
Example #50
0
def expandpath(path):
    """Expand (fully qualify) an arbitrary path to an existing file or directory.

    If path does not map to an existing file the pathname is returned
    unchanged.
    """
    if os.isdir(path):
        return expand_dir_path(path)
    elif os.isfile(path):
        return expand_file_path(path)
    else:
        return path
Example #51
0
def expandpath(path):
    """Expand (fully qualify) an arbitrary path to an existing file or directory.

    If path does not map to an existing file the pathname is returned
    unchanged.
    """
    if os.isdir(path):
        return expand_dir_path(path)
    elif os.isfile(path):
        return expand_file_path(path)
    else:
        return path
Example #52
0
 def register_model(self, model_name):
     if not self.trained:
         if os.isfile(self.this_job_path + '/' + self.torchmodel_filename):
             self.trained = True
         else:
             print("Error: No model trained.")
             return
     # add model information to database(file db or web db)
     if self.trained:
         # create model folder
         model_root_path = self.workspace_path + '/model'
         model_path = model_root_path + '/' + model_name
         createDirectory(model_root_path)
         createDirectory(model_path)
         if self.net_name is not "":
             # copy network file to model path
             # $WORKSPACE/nets{net_name} -> $WORKSPACE/model/{model_name}/torchmodel.py
             org_net_path = self.workspace_path + '/net/' + self.net_name + '.py'
             net_path = model_path + '/' + self.torchnet_filename
             shutil.copy(org_net_path, net_path)
         else:
             org_net_path = self.this_job_path + '/' + self.network_filename
             net_path = model_path + '/' + self.torchnet_filename
             shutil.copy(org_net_path, net_path)
         # copy model file to model path
         # $JOB_PATH/torchmodel.pth -> $WORKSPACE/model/{model_name}/torchmodel.pth
         org_modelfile_path = self.this_job_path + '/' + self.torchmodel_filename
         modelfile_path = model_path + '/' + self.torchmodel_filename
         shutil.copy(org_modelfile_path, modelfile_path)
         # copy service.json to model path
         org_service_file_path = self.this_job_path + '/service.json'
         service_file_path = model_path + '/service.json'
         if os.path.exists(org_service_file_path):
             shutil.copy(org_service_file_path, service_file_path)
         # copy result graph to model path
         if self.problem_type == "classification":
             org_graph_file_path = self.this_job_path + '/confusionMatrix.png'
             graph_file_path = model_path + '/confusionMatrix.png'
         if self.problem_type == "regression":
             org_graph_file_path = self.this_job_path + '/regressionAccuracy.png'
             graph_file_path = model_path + '/regressionAccuracy.png'
         if os.path.exists(org_graph_file_path):
             shutil.copy(org_graph_file_path, graph_file_path)
         # copy score to model path
         org_score_path = self.this_job_path + '/score'
         with open(org_score_path, "r") as score_file:
             score = score_file.readline()
             pos = score.find(':')
             score = score[pos + 2:]
         score_path = model_path + '/score'
         if os.path.exists(org_score_path):
             shutil.copy(org_score_path, score_path)
         self.create_model_metadata(model_name, model_path, score)
Example #53
0
def cp(src, dst, symlink = False, ignores = []):
  '''
    to copy file or directories.
    @src, string, source file or directory.
    @dst, string, destination file or directory
    @symlink, bool, whether ignore symlinks
    @ignores, list, ignore patterns, used as parameter for ignore_patterns.
  '''
  if os.isfile(src):
    shutil.copy(src, dst)
  else:
    shutil.copytree(src, dst, symlink, shutil.ignore_patterns(ignores))
Example #54
0
 def integrity_check():
     ITERATION = 0
     FAILEDLOAD = 0
     for x in self.EXISTS_TABLE:#range(len(self.EXISTS_TABLE)):
         if x:#EXISTS_TABLE
             if  not os.isfile(str(self.LOADED_PATHS[ITERATION])+str(self.FILE_TABLE[ITERATION])):##does the not file exist with the specified path when it should
                 print('Attempting repair...    '+str(self.FILE_TABLE[ITERATION]))
                 ##REPAIR
                 ##END REPAIR
             else:
                 print('found .. '+str(self.FILE_TABLE[ITERATION]))
         ITERATION +=1
     print('Load check complete:  LOADED|'+(len(self.EXISTS_TABLE)- FAILEDLOAD)+'    Failed|'+str(FAILEDLOAD))
Example #55
0
def add_new_command(command, tags):
    """adds a new command, also creates a new tag or adds it
    to the specified tag"""
    command_filename = COMMANDS_DIR+command.split()[0]
    print(command_filename)
    if os.isfile(command_filename):
        print("Filename exists")
    else:
        f = open(command_filename, 'w')

    if(tags is None):
        print(command+"-> No tag specified")
    else:
        print(command+"-> tags: "+" ".join(tags))
Example #56
0
	def compresscallfile(self):
		try:
			self.logger.info("Call dir %s" %self.calldir)
			maildir=os.path.join(self.responder.directory,self.responder.maildir)
			zipfile=os.path.join(maildir,self.callid+".zip")
			os.system("zip -r %s %s" %(zipfile, self.calldir))
			if os.isfile("%s" %(zipfile)):
				self.logger.info("Successfully zipped call file")
			else:
				self.logger.info("Could not compress call file")
			
				
		except:
			self.logger.error("Could not compress call file")
 def update_media(self, base_path=getattr(settings, "MEDIA_ROOT", "")):
     """
     settings.MEDIA_ROOTに指定されたパス配下をすべてS3にバックアップする。
     保存パス:
     /{{project_name}}/media/{{dir_name}}/{{fn}}
     """
     for fn in os.listdir(base_path):
         path = os.path.join(base_path, fn)
         if ( os.isdir(path) ):
             self.update_media(path)
         if not ( os.isfile(path) ):
             # シンボリックリンク等
             continue
         # ToDo: S3にセーブ
         return