Пример #1
1
def extractFiles(indir="/Users/Pratik/Documents/Pratik/Work/practice/py-data-analysis", out="/Users/Pratik/Documents/Pratik/Work/practice/py-data-analysis/extracted"):
    os.chdir(indir)                     # change directory
    archives = glob.glob("*.gz")        # get all archive files that end in .gz
    if not os.path.exists(out):         # if folder doesn't exist make it
        os.mkdirs(out)       
    files = os.listdir("extracted")     # get list of all the files currently in the directory
    for archive in archives:            # loop through archives and extract files
        if archive[:-3] not in files:   # if file is already in folder don't extract (cut out exten)
            patoolib.extract_archive(archive, outdir=out)
Пример #2
0
def run_bowtie2(options = None, output_sam = 'temp.sam'):
    """
    Run Bowtie2 with the given options and save the SAM file.
    """

    # Using bowtie2.
    # Create the bowtie2 index if it wasn't given as input.
    #if not assembly_index:
    if not os.path.exists(os.path.abspath(options.output_dir) + '/indexes'):
        os.makedirs(os.path.abspath(options.output_dir) + '/indexes')
    fd, index_path = mkstemp(prefix='temp_',\
            dir=(os.path.abspath(options.output_dir)   + '/indexes/'))
    try:
        os.mkdirs(os.path.dirname(index_path))
    except:
        pass
    
    fasta_file = options.fasta_file

    build_bowtie2_index(os.path.abspath(index_path), os.path.abspath(fasta_file))
    assembly_index = os.path.abspath(index_path)

    unaligned_dir = os.path.abspath(options.output_dir) + '/unaligned_reads/'
    ensure_dir(unaligned_dir)
    unaligned_file = unaligned_dir + 'unaligned.reads'

    #input_sam_file = output_sam_file
    read_type = " -f "
    if options.fastq_file:
        read_type = " -q "
    
    bowtie2_args = ""
    if options.first_mates:
        bowtie2_args = "-a -x " + assembly_index + " -1 " + options.first_mates\
                + " -2 " + options.second_mates + " -p " + options.threads\
                + " --very-sensitive -a " + " --reorder --"\
                + options.orientation + " -I " + options.min_insert_size\
                + " -X " + options.max_insert_size + ' --un-conc '\
                + unaligned_file
    else:
        bowtie2_args = "-a -x " + assembly_index + read_type + " -U "\
                + options.reads_filenames + " --very-sensitive -a "\
                + " --reorder -p " + options.threads + ' --un '\
                + unaligned_file

    if not options:
        sys.stderr.write("[ERROR] No Bowtie2 options specified" + '\n')
        return
    
    # Using bowtie 2.
    command = "bin/bowtie2-2.2.2/bowtie2 " + bowtie2_args + " -S " + output_sam
    
    out_cmd([command])

    ignore = open('/dev/null', 'w')
    args = shlex.split(command) 
    bowtie_proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=ignore)
    bowtie_output, err = bowtie_proc.communicate()
    
    return unaligned_dir
Пример #3
0
def batch(configFile,threads = 1):
    configs  = readconfig(configFile)
    speciesFile = configs["species"]
    outFolder = configs["outputFolder"]
    incriment = 1
    while os.path.isdir(outFolder):
        outFolder = outFolder+"("+incriment+")"
        incriment+=1
    os.mkdirs(outFolder)
    runParams = product(configs["species"],[outFolder],configs["simReps"],configs["lambdas"],configs["startPops"],configs["N0"],configs["microsats"],configs["alleleCount"],configs["SNPs"],configs["mutationRate"],configs["lociSampling"],configs["popSampling"],configs["regressConfig"])

    if len(configs["simReps"])==1 and len(configs["startPops"])==1 and len(configs["N0"])==1 and len(configs["microsats"])==1 and len(configs["alleleCount"])==1 and len(configs["SNPs"])==1 and len(configs["mutationRate"])==1:
        if threads == 1:
            neFiles = []
            simFiles = runSimulation(runParams[0],runParams[1],runParams[2],runParams[3],runParams[4],runParams[5],runParams[6],runParams[7],runParams[8],runParams[9])
            neDict = {}
            statsDict ={}
            for paramset in runParams:
                runFolder = nameRunFolder(*runParams)
                if not runFolder:
                    continue
                ident = createIdentifier(*runParams)
                neFile, statsFile = run(*runParams)
                neDict[ident] = neFile
                statsDict[ident] = statsFile
    else:
        if threads ==1:
            neDict = {}
            statsDict ={}
            for paramset in runParams:
                ident = createIdentifier(*runParams)
                neFile, statsFile = run(*runParams)
                neDict[ident] = neFile
                statsDict[ident] = statsFile
Пример #4
0
	def extractFiles(self, outputdirectory=os.getcwd()):
		"""Extract all files contained within the GMP file to a folder outputdirectory.
		
		outputdirectory: a directory name or location for files. Containing a directory separator, it is interpretated as an absolute path."""
		if os.path.split(outputdirectory)[0] == '':
			outputdirectory = os.path.join(os.getcwd(), outputdirectory)
		else:
			outputdirectory = os.path.join(outputdirectory, self.GMPFileName + "_files")
		if self.infile.closed:
			try:
				self.infile = open(self.GMPFileName)
			except IOError:
				print self.GMPFileName + " was closed, and we couldn't reopen it. Quitting..."
				exit()
		try: 
			os.mkdirs(outputdirectory)
		except:
			pass
		for i in xrange(self.fileCount):
			self.infile.seek(self.fileDescriptors[i]["offset"])
			filedata = self.infile.read(self.fileDescriptors[i]["rl"])
			if verbose:
				print "Writing file %(name)s (unknown descriptor: %(unknown)08x)" % self.fileDescriptors[i]
			with open((self.GMPFileName + "_files/" + self.fileDescriptors[i]["name"]).encode(), "wb") as oot:
				oot.write(filedata)
Пример #5
0
def submit(request):
    name = request.POST.get("name")
    mobile = request.POST.get("mobile")
    desc = request.POST.get("desc")
    f = request.FILES.get('file')
    date = request.POST.get("date")
    path = "data/"
    if not os.path.exists(path):
        os.mkdirs(path)
    ext = str(f).split(".")[-1]
    ext = ext.lower()
    path = 'data/' + str(time.time()) + "." + ext
    des = open(path,'wb+')
    for j in f.chunks():
        des.write(j)
    des.close()
    dateline = time.time()
    u = User.objects.filter(mobile=mobile).count()
    if not u:
        user = User(name=name,mobile=mobile,desc=desc,like=0,avatar=path,dateline=date)
        user.save()
        return JsonResponse({
            "status":"success"    
        })
    else :
        return JsonResponse({
            "status":"fail",
            "reason":"您已经参加过了哦"
        })
Пример #6
0
    def install(self, filename):
        issrc = False
        if filename.find('src') > 0:
            issrc = True

        f = zipfile.ZipFile(filename, 'r')
        for info in f.infolist():
            if info.file_size == 0:
                continue
            filepath = info.filename
            if not issrc and filepath.find('/.hg/') > 0:
                continue
            if filepath.endswith('EasyAccout.conf') or filepath.endswith('EasyAccout.db'):
                continue
            pos = filepath.find('/')
            newpath = os.path.join(self.home, filepath[pos+1:].replace('/', os.sep))
            newdir = os.path.dirname(newpath)

            if not os.path.isdir(newdir):
                os.mkdirs(newdir)

            newf = open(newpath, 'wb')
            newf.write(f.read(filepath))
            newf.close()

            logfile.info('install:', info.filename, 'to:', newpath)
        f.close()
Пример #7
0
def parse_directory(input_dir,output_dir):
    # walk from start_path and init the file-tree
    # TODO: check last accessed (and also its dependencies) value from index to eventually skip
    input_dir_real_path = os.path.realpath(input_dir)
    output_dir_real_path = os.path.realpath(output_dir)
    for dir_path, dir_names, file_names in os.walk(input_dir_real_path, topdown=True, followlinks=True):
        dir_path_stripped = dir_path[len(input_dir_real_path)+1:]
        for f in file_names:
            src = os.path.join(input_dir_real_path, dir_path_stripped, f)
            dest = os.path.join(output_dir_real_path, dir_path_stripped, f)
            base_name_ext_tuple = os.path.splitext(f)
            if len(base_name_ext_tuple) > 1 \
                and base_name_ext_tuple[1][1:] in EXTENSIONS:
                file_type = base_name_ext_tuple[1][1:]
                output_file = open(dest, "w")
                parse_file(os.path.join(dir_path_stripped,f), file_type, threshold=1.0, output_file=output_file)
                output_file.close()
            else:  # if extension not found, just copy
                shutil.copy(src, dest)
            path_name = os.path.join(dir_path, f)[len(root_input_path) + 1:]  # cut off root-path
            if not path_name in index_list:
                index_list[path_name] = []

        for d in dir_names:  # create destination dirs if necessary
            os.mkdirs(os.path.join(output_dir, d), exist_ok=True)
Пример #8
0
def generate_runtime_profile_diff(current_row, ref_row, was_change_significant,
                                  is_regression):
  """Generate an html diff of the runtime profiles.

  Generates a diff of the baseline vs current runtime profile to $IMPALA_HOME/results
  in html format. The diff file is tagged with the relavent query information
  and whether its an improvement or a regression ( if applicable )
  """
  diff = difflib.HtmlDiff(wrapcolumn=90, linejunk=difflib.IS_LINE_JUNK)
  file_name_prefix = "%s-%s-%s-%s" % (current_row[QUERY_NAME_IDX],
                                      current_row[SCALE_FACTOR_IDX],
                                      current_row[FILE_FORMAT_IDX],
                                      current_row[COMPRESSION_IDX])
  if was_change_significant:
    file_name_prefix += '-regression' if is_regression else '-improvement'
  file_name = '%s.html' % file_name_prefix
  # Some compressions codecs have a `/`, which is not a valid file name character.
  file_name = file_name.replace('/', '-')
  dir_path = os.path.join(os.environ["IMPALA_HOME"], 'results')
  # If dir_path does not exist, create a directory. If it does exist
  # and is not a directory, remove the file and create a directory.
  if not os.path.exists(dir_path):
    os.mkdirs(dir_path)
  elif not os.path.isdir(dir_path):
    raise RuntimeError("Unable to create $IMPALA_HOME/results, results file exists")
  file_path = os.path.join(dir_path, file_name)
  html_diff = diff.make_file(ref_row[RUNTIME_PROFILE_IDX].splitlines(1),
      current_row[RUNTIME_PROFILE_IDX].splitlines(1),
      fromdesc="Baseline Runtime Profile", todesc="Current Runtime Profile")
  with open(file_path, 'w+') as f:
    f.write(html_diff)
Пример #9
0
    def generateTFC(self):
        """
        _generateTFC_

        Method to generate on the fly TFC. Each LFN would be mapped to local PFN 
        """ 
        workingDir = os.path.join(os.getcwd(),'prestage')
        if not os.path.exists(workingDir):
           os.mkdirs(workingDir)
        
        tfcFile = IMProvDoc ('storage-mapping')

        for item in self.localFiles.keys():

           if item.startswith('/'):
              temp = item.split('/',1)[1]

           temp = os.path.split(temp) 

           params = {'protocol':'local-stage-in','path-match':'/%s/(%s)' % (temp[0],temp[1]),'result':'file:%s/$1' % workingDir}

           node = IMProvNode('lfn-to-pfn',None,**params)
           tfcFile.addNode(node)
        
        handle = open('%s/prestageTFC.xml' % workingDir, 'w')
        handle.write (tfcFile.makeDOMDocument().toprettyxml() )
        handle.close()           
       
        return
Пример #10
0
    def __init__(self):
        print('Generating blank CSDV Resources...')
        if not os.path.exists('./data'):
            os.mkdirs('./data')

        self.mkCSDV('bills', 'Description\tAmount\tFrequency\tLast Payed')
        self.mkCSDV('budget', 'Discription\tAlowance\tAmount to Date')
        self.mkCSDV('registry', 'Check #\tdate\ttransaction\tpayment\tdeposit\tbalance')
        self.mkCSDV('items', 'NULL')
        self.mkCSDV('recipies', 'NULL')
        print('')

        print('Reconstituting resource files...')
        if not os.path.exists('./data/resources'):
            os.mkdir('./data/resources')
        #self.genFile('stop32', 'png')
        #self.genFile('minus32', 'png')
        #self.genFile('plus32', 'png')
        print('')

        print('First Run of Application Detected')
        if not os.path.exists('./data/backup'):
            os.mkdir('./data/backup')
        if not os.path.exists('./data/backup/data'):
            os.mkdir('./data/backup/data')
        if not os.path.exists('./data/backup/data/resources'):
            os.mkdir('./data/backup/data/resources')
        backup = repair.repair('backup')
        open('./data/firstrun', 'w').write('')
Пример #11
0
    def filesToLog(self, path):
        """ Moves non-data files (not ?_[a-z]*.6?) from model to log directory """
        logDir = "%s/log" % path
        runDir = "%s/run" % path
        try:
            os.mkdirs(path)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        # Files to move listed, then path added on
        filesToMove = [
            'centers.bp',
            'coriolis.out',
            'flux.out',
            'mirror.out',
            'sample_Z.out',
            'sidecenters.bp',
            'total.dat',
            'total_ST.dat']
        for files in filesToMove:
            files = "%s/%s" % (runDir, files)

        filesToSearch = ["*.o[1-9]*", "*.po[1-9]*"]
        for part in filesToSearch:
            for files in glob.glob("%s/%s" % (runDir, part)):
                filesToMove.append(files)

        # Loop over the files listed above and move to log dir
        for each in filesToMove:
            try:
                shutil.move(each, logDir)
            except:
                print "Unable to move %s to %s as part of post run cleanup" % (each, logDir)
Пример #12
0
def main():
  chk_fpath = os.path.expanduser(FLAGS.checkpoint_file)
  reader = tf.train.NewCheckpointReader(chk_fpath)
  var_to_shape_map = reader.get_variable_to_shape_map()
  output_dir = os.path.expanduser(FLAGS.output_dir)
  if not os.path.exists(output_dir):
    os.mkdirs(output_dir)
  manifest = {}
  remove_vars_compiled_re = re.compile(FLAGS.remove_variables_regex)

  var_filenames_strs = []
  for name in var_to_shape_map:
    if (FLAGS.remove_variables_regex and
        re.match(remove_vars_compiled_re, name)) or name == 'global_step':
      print('Ignoring ' + name)
      continue
    var_filename = _var_name_to_filename(name)
    manifest[name] = {'filename': var_filename, 'shape': var_to_shape_map[name]}

    print('Writing variable ' + name + '...')
    tensor = reader.get_tensor(name)
    with open(os.path.join(output_dir, var_filename), 'wb') as f:
      f.write(tensor.tobytes())

    var_filenames_strs.append("\"" + var_filename + "\"")

  manifest_fpath = os.path.join(output_dir, 'manifest.json')
  print('Writing manifest to ' + manifest_fpath)
  with open(manifest_fpath, 'w') as f:
    f.write(json.dumps(manifest, indent=2, sort_keys=True))
  print('Done!')
Пример #13
0
def housekeeping():
    if not os.path.exist(DIR_NAME):
        os.mkdirs(DIR_NAME, 0644)
    while len(os.listdir(DIR_NAME)) > ARCHIVE_SIZE:
        m = { os.stat(f).st_mtime: f for f in os.listdir(DIR_NAME) }
        os.remove(m[sorted(m.keys(), key=int, reverse=True)[0]])
        ARCHIVE_SIZE -= 1
Пример #14
0
def check_cache(cache_folder):
    """ Checks location for ability to read/write from cache

    Args:
        cache_folder (str): location of cache folder

    Returns:
        tuple: tuple of booleans describing ability to read and write
            to/from cache

    """
    read_cache = False
    write_cache = False

    if os.path.isdir(cache_folder):
        if os.access(cache_folder, os.R_OK):
            read_cache = True

        if os.access(cache_folder, os.W_OK):
            write_cache = True
    else:
        try:
            os.mkdirs(cache_folder)
        except:
            pass
        else:
            read_cache = True
            write_cache = True

    return (read_cache, write_cache)
Пример #15
0
def poll_db(conf):
    global secret
    global frontend_url
    global cache_path

    logging.basicConfig(**conf["log"])

    datastream.initclient(**conf["base"])
    frontend_url = conf["daemon"]["BASEJUMP_FRONTEND_URL"]
    secret = conf["daemon"]["BASEJUMP_KEY"]
    cache_path = conf["daemon"]["CACHE_DIR"]
    # An executable command that takes a "source path" and a "target path" to transfer a file
    transfer_cmd = conf["daemon"]["TRANSFER_CMD"]

    if not os.path.exists(cache_path):
        os.mkdirs(cache_path)
    log = get_log()
    while True:
        try:
            transfers = get_transfers()

            for xfer in transfers.values():
                path = xfer["path"]
                # At some point will need to tidy this selection process up to see if a transfer is active or not
                log.info("Found transfer of %s" % path)
                if transfer(xfer):
                    log.info("Completed transfer of %s" % path)
                else:
                    log.info("Unable to transfer %s; will try later." % path)
        except Exception as e:
            log.error("Unable to retrieve transfers.")
            log.exception(e)
        log.info("Resting for a minute")
        time.sleep(60)
        log.info("Ready to take another look!")
Пример #16
0
    def generate_css_file(self, force, name, input_file, output_file, **kwargs):
        # check that the sass input file actually exists.
        if not os.path.exists(input_file):
            raise SassConfigException('The input \'%s\' does not exist.\n' %input_file)
        output_path = output_file.rsplit('/', 1)[0]
        if not os.path.exists(output_path):
            # try to create path
            try:
                os.mkdirs(output_path, 0o644)
            except os.error as e:
                raise SassConfigException(e.message)
            except AttributeError as e:
                # we have an older version of python that doesn't support os.mkdirs - fail gracefully.
                raise SassConfigException("Output path does not exist - please create manually: %s\n" % output_path)

        try:
            sass_obj = SassModel.objects.get(name=name)
            was_created = False
        except SassModel.DoesNotExist:
            sass_obj = SassModel(name=name)
            was_created = True

        sass_obj.sass_path = input_file
        sass_obj.css_path = output_file

        needs_update = was_created or force or update_needed(sass_obj)
        if needs_update:
            sass_dict = { 'bin' : self.bin, 'sass_style' : self.sass_style, 'input' : input_file, 'output' : output_file }
            cmd = "%(bin)s -t %(sass_style)s -C %(input)s > %(output)s" %sass_dict
            p = subprocess.Popen([self.bin, "-t", self.sass_style, "--no-cache", input_file, output_file])
            stdout, stderr = p.communicate()
            if p.returncode != 0: # Process failed (nonzero exit code)
                raise SassException(stderr)
            sass_obj.save()
Пример #17
0
  def run(self):
    patchApplyConfig = self._config['Patch_Apply']
    isContinuous = patchApplyConfig.get('continuous')
    patchLogDir = patchApplyConfig['log_dir']
    if not os.path.exists(patchLogDir):
      logger.error("%s does not exist" % patchLogDir)
      return False
    inputPatchDir = patchApplyConfig['input_patch_dir']
    mExtractConfig = self._config['M_Extract']
    mRepo = mExtractConfig['M_repo']
    mRepoBranch = mExtractConfig.get('M_repo_branch', None)
    outputDir = mExtractConfig['temp_output_dir']
    if not os.path.exists(outputDir):
      os.mkdirs(outputDir)
    extractLogDir  = mExtractConfig['log_dir']
    commitMsgDir = mExtractConfig['commit_msg_dir']
    if not os.path.exists(commitMsgDir):
      logger.error("%s does not exist" % commitMsgDir)
      return False
    backupConfig = self._config.get('Backup')
    startCache(self._instance, self._useSudo)
    testClient = self._createTestClient()
    with testClient:
      patchApply = PatchSequenceApply(testClient, patchLogDir)
      outPatchList = patchApply.generatePatchSequence(inputPatchDir)
      if not outPatchList:
        logger.info("No Patch needs to apply")
        return True
      if not isContinuous:
        outPatchList = [outPatchList[0]]
      for patchInfo in outPatchList:
        logger.info(patchInfo)
        result = patchApply.applyPatchSequenceByInstallName(
                                              patchInfo.installName,
                                              patchOnly=True)
        if result < 0:
          logger.error("Error installing patch %s" % patchInfo.installName)
          return False
        elif result == 0:
          logger.info("%s is already installed" % patchInfo.installName)
          continue
        commitFile = getDefaultCommitMsgFileByPatchInfo(patchInfo,
                                                        dir=commitMsgDir)
        generateCommitMsgFileByPatchInfo(patchInfo, commitFile)
        MExtractor = VistADataExtractor(mRepo, outputDir, extractLogDir,
                                        gitBranch=mRepoBranch)
        MExtractor.extractData(testClient)
        commit = MCompReposCommitter(mRepo)
        commit.commit(commitFile)

        if backupConfig:
          backupDir = backupConfig['backup_dir']
          if not os.path.exists(backupDir):
            logger.error("%s does not exist" % backupDir)
            return False
          cacheDir = backupConfig['cache_dat_dir']
          origDir = os.path.join(cacheDir, "CACHE.DAT")
          backupCacheDataByGitHash(self._instance, origDir, backupDir,
                                   mRepo, mRepoBranch, self._useSudo)
          startCache(self._instance, self._useSudo)
Пример #18
0
    def process(self, instance):

        extractedPaths = [v for k,v in instance.data.items() if k.startswith('outputPath')]
        self.log.debug(extractedPaths)
        for path in extractedPaths:

            # sourcePath = os.path.normpath(instance.data.get('outputPath'))
            sourcePath = path
            filename, ext = os.path.splitext(sourcePath)
            self.log.debug('source filename: ' + filename)
            self.log.debug('source ext: ' + ext)
            publishFile = instance.data['publishFile']
            publishFile = os.path.splitext(publishFile)[0] + ext
            self.log.debug(publishFile)

            components = instance.data['ftrackComponents']

            components[str(ext)[1:]] = {'path': publishFile}

            self.log.debug('components: {}'.format(str(components)))

            if not os.path.exists(os.path.dirname(publishFile)):
                os.mkdirs(os.path.dirname(publishFile))

            self.log.info('Copying model from location: {}'.format(sourcePath))
            self.log.info('Copying model to location: {}'.format(publishFile))
            shutil.copy(sourcePath, publishFile)

            instance.data['publishFile'] = publishFile
Пример #19
0
def build_and_run(android_sdk=None):
	# first we need to find the desktop SDK for tibuild.py
	if platform.system() == 'Darwin':
		base_sdk = '/Library/Application Support/Titanium/sdk/osx'
		platform_name = 'osx'
	elif platform.system() == 'Windows':
		if platform.release() == 'XP':
			base_sdk = 'C:\\Documents and Settings\\All Users\\Application Data\\Titanium\\sdk\\win32'
		else:
			base_sdk = 'C:\\ProgramData\\Titanium\\sdk\\win32'
		platform_name = 'win32'
	elif platform.system() == 'Linux':
		base_sdk = os.path.expanduser("~/.titanium/sdk/linux")
		platform_name = 'linux'
	
	if not os.path.exists(base_sdk):
		error_no_desktop_sdk()
	
	versions = os.listdir(base_sdk)
	if len(versions) == 0:
		error_no_desktop_sdk()
	
	# use the latest version in the system
	versions.sort(cmp_versions)
	use_version = versions[0]
	
	desktop_sdk = os.path.join(base_sdk, use_version)
	tibuild = os.path.join(desktop_sdk, 'tibuild.py')
	drillbit_build_dir = os.path.join(mobile_dir, 'build', 'drillbit')
	mobile_dist_dir = os.path.join(mobile_dir, 'dist')
	
	sys.path.append(mobile_dist_dir)
	sys.path.append(os.path.join(mobile_dir, 'build'))
	import titanium_version
	
	# extract the mobilesdk zip so we can use it for testing
	mobilesdk_dir = os.path.join(mobile_dist_dir, 'mobilesdk', platform_name, titanium_version.version)
	mobilesdk_zip = zipfile.ZipFile(os.path.join(mobile_dist_dir, 'mobilesdk-%s-%s.zip' % (titanium_version.version, platform_name)))
	mobilesdk_zip.extractall(mobile_dist_dir)
	mobilesdk_zip.close()
	
	if not os.path.exists(drillbit_build_dir):
		os.mkdirs(drillbit_build_dir)
	
	sys.path.append(desktop_sdk)
	import env
	
	# use the desktop SDK API to stage and run drillbit (along w/ it's custom modules)
	environment = env.PackagingEnvironment(platform_name, False)
	app = environment.create_app(drillbit_dir)
	stage_dir = os.path.join(drillbit_build_dir, app.name)
	app.stage(stage_dir, bundle=False)
	app.install()
	
	app_modules_dir = os.path.join(app.get_contents_dir(), 'modules')
	if os.path.exists(app_modules_dir):
		shutil.rmtree(app_modules_dir)
	
	shutil.copytree(os.path.join(drillbit_dir, 'modules'), app_modules_dir)
	app.env.run([app.executable_path, '--debug', '--mobile-sdk=' + mobilesdk_dir, '--android-sdk=' + android_sdk])
Пример #20
0
 def mount(self, fs, passwd=''):
     if not os.path.isdir(os.path.join('/media', fs.name)):
         os.mkdirs(os.path.join('/media', fs.name))
     if fs.fstype in ['crypt', 'vdisk', 'loop']:
         dev = losetup.find_unused_loop_device()
         dev.mount(fs.img)
         if fs.fstype == 'crypt':
             s = shell_cs('echo "%s" | cryptsetup luksOpen %s %s'%(passwd,dev.device,fs.name), stderr=True)
             if s[0] != 0:
                 dev.unmount()
                 raise Exception('Failed to decrypt %s: %s'%(fs.name, s[1]))
             s = shell_cs('mount /dev/mapper/%s %s'%(fs.name, os.path.join('/media', fs.name)), stderr=True)
             if s[0] != 0:
                 shell('cryptsetup luksClose %s'%fs.name)
                 dev.unmount()
                 raise Exception('Failed to mount %s: %s'%(fs.name, s[1]))
         else:
             s = shell_cs('mount %s %s'%(dev.device, os.path.join('/media', fs.name)), stderr=True)
             if s[0] != 0:
                 dev.unmount()
                 raise Exception('Failed to mount %s: %s'%(fs.name, s[1]))
         apis.poicontrol(self.app).add(fs.name, 'vdisk', 
             fs.mount, 'filesystems', False)
     else:
         s = shell_cs('mount %s %s'%(fs.dev, os.path.join('/media', fs.name)), stderr=True)
         if s[0] != 0:
             raise Exception('Failed to mount %s: %s'%(fs.name, s[1]))
         apis.poicontrol(self.app).add(fs.name, 'disk', 
             fs.mount, 'filesystems', False)
def create(args):
  cwd = os.getcwd()
  if args.path != ".":
    if not os.path.exists(args.path):
      os.mkdirs(args.path)
    cwd = args.path
  project = os.path.join(cwd, args.name)
  os.mkdir(project)
  
  orgString = ""
  for i in args.organization.split('.'):
    orgString = os.path.join(orgString, i)

  main = os.path.join('src', os.path.join('main', 
    os.path.join('scala', orgString)))
  test = os.path.join('src', os.path.join('test', 
    os.path.join('scala', orgString)))

  for folder in ['project', 'build', 'lib', main, test]:
    try:
      os.makedirs(os.path.join(project, folder))
    except:
      pdb.set_trace()
  writeSbtFile(args)
  writePluginsFile(args)
def MakeTargetDirs(config) :
    lanes = config.get("Options", "lanes")
    if lanes != "":
        lanes = "_lanes{}".format(lanes)

    assert(config["Paths"]["runID"] != None)
    os.mkdirs("%s/%s%s" % (config["Paths"]["outputDir"], config["Options"]["runID"], lanes))
Пример #23
0
 def ensureDirectory(self):
     #Python的函数定义中有两种特殊的情况,即出现*,**的形式
     #* 用来传递任意个无名字参数,这些参数会一个Tuple的形式访问
     #**用来处理传递任意个有名字的参数,这些参数用dict来访问。
     #参看http://blog.sina.com.cn/s/blog_7dc317590101cbkr.html
     path = os.path.join(*self.directory)
     if not os.path.isdir(path):os.mkdirs(path)
Пример #24
0
    def run(self):
        """ High-level function to execute a compose. """
        if not os.path.isdir(self.cache_path):
            os.mkdirs(self.cache_path)

        if not os.path.isdir(self.target):
            raise SystemExit('target "%s" is not a directory. Please configure an existing target directory for this compose.' % self.target)

        # Run the steps for each distro.
        for distro in self.builds.keys():
            # (We assume that all keys in self.builds also exist in
            # self.comps.)
            if distro not in self.comps.keys():
                log.error('Loading builds for "%s", and the comps configuration is missing a "%s" key. Please add a comps XML file for this distro.')
                exit(1)
            self.run_distro(distro)

        # Copy any extra files to the root of the compose.
        for extra_file in self.extra_files:
            # For "glob" type extra files, glob the compose's output_dir, and
            # copy the results to the root.
            if 'glob' in extra_file:
                glob_path = os.path.join(self.output_dir, extra_file['glob'])
                for f in glob.glob(glob_path):
                     copy(f, self.output_dir)
            # For "file" type extra files, copy the file from the user's cwd.
            if 'file' in extra_file:
                copy(extra_file['file'], self.output_dir)
def BRAINSCutCreateVector( configurationFilename, 
                           probabilityMapDict,
                           normalization,
                           outputXmlFilename,
                           outputVectorFilename):
    print( BRAINSCutCreateVector )
    print( BRAINSCutCreateVector )
    print( BRAINSCutCreateVector )
    print( BRAINSCutCreateVector )
    print( BRAINSCutCreateVector )
    print( BRAINSCutCreateVector )
    import os
    import sys
    for roi in probabilityMapDict.iterkeys():
        if not os.path.exists( probabilityMapDict[ roi ]  ):
            print( """ ERROR   
                   {fn}  does not exist.
                   """.format( fn=probabilityMapDict[roi]) )
            sys.exit()

    vectorDir = os.path.dirname( os.path.abspath( outputVectorFilename ))
    if not os.path.exists( vectorDir ):
        os.mkdirs( vectorDir )
    generateProbabilityMap = False
    createVectors = True
    trainModel = False
    applyModel = False
    dummyMethodParameter = {}
    dummyOutputDirDict = {}
    dummyModelFilename = "na"
    probabilityMapGaussianSigma=1
    from ConfigurationParser import BRAINSCutCMDFromConfigFile
    returnList= BRAINSCutCMDFromConfigFile( configurationFilename,
                                outputXmlFilename, 
                                probabilityMapDict,
                                probabilityMapGaussianSigma, 
                                outputVectorFilename,
                                dummyModelFilename, 
                                generateProbabilityMap,
                                createVectors,
                                normalization,
                                trainModel,
                                applyModel,
                                dummyOutputDirDict, 
                                dummyMethodParameter)
    outputVectorFilename = returnList[ 'inputVectorFilename' ]    
    outputVectorHdrFilename = outputVectorFilename + ".hdr"
    outputXmlFilename = os.path.abspath( outputXmlFilename )
    outputNormalization = normalization
    print("""Output of BRAINSCutCreateVector
          outputVectorFilename = {ovf}
          outputVectorHdrFilename = {ovh}
          outputNormalization = {on}
          outputXmlFilename = {oxf}
          """.format( ovf=outputVectorFilename, 
                         ovh=outputVectorHdrFilename,
                         on=outputNormalization,
                         oxf=outputXmlFilename ))
    return outputVectorFilename, outputVectorHdrFilename, outputNormalization, outputXmlFilename
Пример #26
0
 def json_dump(self, obj, filename):
     '''Dump ``obj`` to the ``filename`` in cache as JSON.'''
     target_file = os.path.join(self.root_dir, filename)
     target_dir = os.path.dirname(target_file)
     if not os.path.exists(target_dir):
         os.mkdirs(target_dir)
     with open(target_file, 'wb') as f:
         json.dump(obj, f)
Пример #27
0
    def save(self):
        save_dir = os.path.join(self.save_dir, self.dataset, self.model_name)

        if not os.path.exists(save_dir):
            os.mkdirs(save_dir)

        torch.save(self.G.state_dict(), os.path.join(save_dir, '_G.bin'))
        torch.save(self.D.state_dict(), os.path.join(save_dir, '_D.bin'))
Пример #28
0
def save_original(img, save_dir=None, name="temp"):
    img = np.transpose(img, [1, 2, 0])
    im = Image.fromarray(np.asarray(img).astype('uint8'), 'RGB')
    # create dir to save originals
    if not os.path.exists('../data/Originals/'):
        os.mkdirs('../data/Originals/')
    im.save('../data/Originals/' + save_dir + '/' + str(name) + '.jpeg')
    im.close()
Пример #29
0
def save_fig(fig_id, tight_layout=True):
    if not os.path.exists('images'):
        os.mkdirs('images')
    path = os.path.join(PROJECT_ROOT_DIR, 'images', fig_id + '.png')
    print("Saving figure", fig_id)
    if tight_layout:
        plt.tight_layout()
    plt.savefig(path, format=r'png', dpi=300)
Пример #30
0
Файл: ner.py Проект: skrydg/ml
    def save_model(self, path):
        path = Path(path)
        if not os.path.exists(path.parent):
            os.mkdirs(path.parent)

        self.nlp.meta["name"] = path.name
        self.nlp.to_disk(path)
        print("Saved model to", path)
Пример #31
0
def mkdir(bn):
    "create dir in filesystem"
    path = bn.getPath()
    path= "%s/%s" % (save_path,path)
    try:
        os.mkdirs(path)
    except:
        pass
Пример #32
0
 def json_dump(self, obj, filename):
     '''Dump ``obj`` to the ``filename`` in cache as JSON.'''
     target_file = os.path.join(self.root_dir, filename)
     target_dir = os.path.dirname(target_file)
     if not os.path.exists(target_dir):
         os.mkdirs(target_dir)
     with open(target_file, 'wb') as f:
         json.dump(obj, f)
Пример #33
0
 def create(cls, path=None, create_if_not_exist=False):
     if path is None:
         path = tempfile.mkdtemp()
     if create_if_not_exist and not os.path.exists(path):
         os.mkdirs(path)
     ve = cls(path)
     virtualenv.create_environment(path)
     return ve
Пример #34
0
 def process_item(self, item, spider):
     location = os.path.join(FILE_TEMP_PATH,
                             item['website_title'] + ".html")
     if not os.path.exists(os.path.dirname(location)):
         os.mkdirs(os.path.dirname(location))
     with open(location, "w+") as website_files:
         website_files.write(item['website_html'].encode("utf-8"))
         website_files.flush()
Пример #35
0
    def suggest(self, username, string_id, suggestion):
        """Store a suggestion for a given string."""

        # figure out where to store this suggestion
        sugg_path = os.path.join(self.domain.path, self.lang, 'suggestions',
                                 hash(string_id))
        if not os.path.exists(sugg_path):
            os.mkdirs(sugg_path)
Пример #36
0
 def init(self):
   self = super(AController, self).init()
   sqlfile = self.pathForFilename('db/' + '.'.join(self.CURRENT_ACCT))
   if not os.path.exists(sqlfile):
       os.mkdirs(os.path.dirname(sqlfile))
   self.conn = sqlite3.connect(sqlfile)
   self.convert_new_format()
   return self
Пример #37
0
def csvfy():
    if not os.path.isdir(out_folder):
        os.mkdirs(out_folder)  # make the output folder if it doesn't exist
    files = os.listdir(data_folder)
    cols = ['DRIVER_ID', 'TRIP_ID', 'TIMESTAMP', 'LONGITUDE', 'LATITUDE']

    for i, f in enumerate(files):
        csvfy_aux(f, i, cols)
Пример #38
0
def main():

    args = parse_args()

    print_args(args)

    set_seed(args.seed)

    # load data
    train_data, val_data, test_data, class_names, vocab = loader.load_dataset(
        args)

    args.id2word = vocab.itos

    # initialize model
    model = {}
    model["G"] = get_embedding(vocab, args)  # model["G"]里面 是 词向量平均 + FC

    criterion = ContrastiveLoss()
    # model["G2"] = get_embedding_M2(vocab, args)
    # model["clf"] = get_classifier(model["G"].hidden_size * 2, args)

    if args.mode == "train":
        # train model on train_data, early stopping based on val_data
        optG = train(train_data, val_data, model, class_names, criterion,
                     args)  # 使用孪生网络,来进行maml的方法,只改变FC

    # val_acc, val_std, _ = test(val_data, model, args,
    #                                         args.val_episodes)

    test_acc, test_std = test(test_data, class_names, optG, model, criterion,
                              args, args.test_epochs, True)

    # path_drawn = args.path_drawn_data
    # with open(path_drawn, 'w') as f_w:
    #     json.dump(drawn_data, f_w)
    #     print("store drawn data finished.")

    # file_path = r'../data/attention_data.json'
    # Print_Attention(file_path, vocab, model, args)

    if args.result_path:
        directory = args.result_path[:args.result_path.rfind("/")]
        if not os.path.exists(directory):
            os.mkdirs(directory)

        result = {
            "test_acc": test_acc,
            "test_std": test_std,
            # "val_acc": val_acc,
            # "val_std": val_std
        }

        for attr, value in sorted(args.__dict__.items()):
            result[attr] = value

        with open(args.result_path, "wb") as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
Пример #39
0
def MakeTargetDirs(config):
    lanes = config.get("Options", "lanes")
    if lanes != "":
        lanes = "_lanes{}".format(lanes)

    assert (config["Paths"]["runID"] != None)
    os.mkdirs(
        "%s/%s%s" %
        (config["Paths"]["outputDir"], config["Options"]["runID"], lanes))
Пример #40
0
 def run(self):
     if self.cleanAndPrepareOutputDir:  # Run only if asked to clean existing output file
         if os.path.exists(self.outputDir):  # check if dir exists
             if os.path.exists(self.outputPath):  # check if file exists
                 os.remove(self.outputPath)
             else:  # just delete the directory
                 os.removedirs(self.outputDir)
         else:  # create required directories
             os.mkdirs(self.outputDir)
Пример #41
0
    def _cache_fname(self, cache_dir):
        if self.mpi_rank == 0:
            if not os.path.exists(self._cache_dir):
                os.mkdirs(self._cache_dir)
        bmtk_world_comm.barrier()

        return os.path.join(
            self._cache_dir,
            '.bmtk.spikes.cache.node{}.csv'.format(self.mpi_rank))
Пример #42
0
def save_features(features, category, vid_file):
    """
    Function to save the features as csv
    """
    folder_path = os.path.join(output_dir, category)
    file_path = os.path.join(folder_path, vid_file.split('.')[0] + '.csv')
    if not (os.path.exists(folder_path)):
        os.mkdirs(folder_path)
    pd.DataFrame(features, index=None).to_csv(file_path, header=False)
Пример #43
0
 def __init__(self, *args, **kwargs):
     os.mkdirs(LOG_PATH, exist_ok=True)
     # Rotate on UTC Sundays
     logger = logging.getLogger('rotating_log')
     logger.setLevel(logging.INFO)
     self.logger = logging.TimedRotatingFileHandler(LOG_PATH,
                                                    when='W6',
                                                    utc=True)
     super()
Пример #44
0
def intelOpen(path, mode):
    """
    intelligent open function, open path with mode, if dirname of path
    isn't exists, create it automatically
    """
    basedir = os.path.dirname(path)
    if not os.path.isdir(basedir):
        os.mkdirs(basedir)
    return open(path, mode)
Пример #45
0
def run_matching(config):

    bbox_context = 0.5
    resize_height = 150
    try_flip = True
    mst_fname = config.mst_save_fname
    seg_fname = config.coseg_save_fname

    im_out_dir = config.matching_im_out_dir;
    imdata_fname = config.train_imagedata_fname;
    im_base = config.im_base;

    save_fname = config.alignment_fname;

    if os.path.exists(save_fname):
        print('Matching alread done!')
        return

    if not os.path.exists(im_out_dir):
        os.mkdirs(im_out_dir)

    #get the pose pose_graph_layer
    print('Matching, loading files...')
    with open(mst_fname, 'r') as f:
        msts = pickle.load(f)
        graph = msts[i]
        for i in range(1, len(msts)):
            graph += msts[i]

    #Load segmenetations and images
    with open(seg_fname, 'r') as f:
        segmentations = pickle.load(f)
        f.close()
    with open(imdata_fname, 'r') as f:
        images = pickle.load(f)
        all_ims = pickle.load(f)
        f.close()
    print('Done loading')

    for i in range(len(images)):
        print('Matching {}{}'.format(i, len(images)))
        out_fname = os.path.join(im_out_dir, str(i) + '.p')
        if os.path.exists(out_fname):
            continue

        start = i
        to_inds = find(graph(from,:)>0) #def broken

        inds = [start] + to_inds
        local_images = [images[i] for i in inds]
        segs = [segmentations[i] for i in inds]
        ims = [all_ims[i] for i in inds]
        for j in range(len(local_images)):
            im = ims[j]
            segs[j] = imresize(segs{j}, [size(im, 1) size(im, 2)]); #broken
            bbox = local_images(j)['bbox']
Пример #46
0
 def run(self,
         paramfile,
         rundir=None,
         EXE="Galacticus.exe",
         NTHREAD=1,
         logfile=None,
         exitOnFail=True):
     funcname = self.__class__.__name__ + "." + sys._getframe(
     ).f_code.co_name
     # Check parameter file exists
     if not os.path.exists(paramfile):
         raise RuntimeError(funcname + "(): Parameter file '" + paramfile +
                            "' not found!")
     print(funcname + "(): GALACTICUS PARAMETER FILE = " + paramfile)
     # Get number of threads to use
     NTHREAD = os.getenv("OMP_NUM_THREADS", NTHREAD)
     print(funcname + "(): Using " + str(NTHREAD) + " threads...")
     sys.stdout.flush()
     # Check Galacticus compiled
     if not os.path.exists(EXE):
         self.compile(NTHREAD=NTHREAD, EXE=EXE)
     # Create run directory and move files if necessary
     if rundir is None:
         rundir = self.GALACTICUS_ROOT
     if not os.path.exists(rundir):
         os.mkdirs(rundir)
     if not rundir.endswith("/"):
         rundir = rundir + "/"
     if not fnmatch.fnmatch(rundir, self.GALACTICUS_ROOT):
         paramfileName = paramfile.split("/")[-1]
         os.rename(paramfile, rundir + paramfileName)
         if not os.path.exists(rundir + EXE):
             shutil.copy2(self.GALACTICUS_ROOT + "/" + EXE, rundir + EXE)
     # Run Galacticus
     os.chdir(rundir)
     print(funcname + "(): Running Galacticus...")
     print(funcname + "(): Current directory = " + os.getcwd())
     print(funcname + "(): Running " + EXE + "...")
     SW = STOPWATCH()
     sys.stdout.flush()
     cmd = "./" + EXE + " " + paramfile
     if logfile is not None:
         cmd = cmd + " &> " + logfile
     os.system(cmd)
     sys.stdout.flush()
     # Check logfile if specified
     if logfile is None and os.path.exists(logfile):
         pattern = "*MM: <- Finished task set*"
         failed = len(
             fnmatch.filter(open(logfile, 'r').readlines(), pattern)) == 0
         if exitOnFail and failed:
             raise RuntimeError(funcname + "(): Galacticus run FAILED!")
     print(funcname + "():Galacticus finished processing")
     SW.stop()
     sys.stdout.flush()
     return
Пример #47
0
def compress():
    local_path = 'E:/ontoData/guiyangData/'

    root, dirs, files = os.walk(local_path)
    for file in files:
        file_path = os.path.join(root, file)
        os.mkdirs(file_path)
        if zipfile.is_zipfile(file_path):
            zip_file = zipfile.ZipFile(file_path)
            zip_file.extractall(path=local_path)
Пример #48
0
def getDownloadPath(baseUrl, absoluteUrl, downloadDirectory):
    path = absoluteUrl.replace("www.", "")
    path = path.replace(baseUrl, "")
    path = downloadDirectory + path
    directory = os.path.dirname(path)

    if not os.path.exists(directory):
        os.mkdirs(directory)

    return  path
Пример #49
0
def download_vod(url, video_name, username, game):
    os.mkdirs(BASE_DIR + "/" + game + "/" + username, exist_ok=True)
    os.chdir(BASE_DIR + "/" + game + "/" + username)

    ydl_opts = {'restrictfilenames': True}
    try:
        with youtube_dl.YoutubeDL(ydl_opts) as ydl:
            ydl.download([url])
    except Exception:
        print("Download error, will come back to it")
Пример #50
0
def _sco_init_outputs(output_directory, create_directories, output_prefix, output_suffix):
    if not os.path.exists(output_directory) or not os.path.isdir(output_directory):
        if create_directories:
            os.mkdirs(output_directory)
        if not (create_directories
                and os.path.exists(output_directory) and os.path.isdir(output_directory)):
            raise ValueError('Output directory does not exist')
    output_prefix = '' if output_prefix is None else output_prefix
    output_suffix = '' if output_suffix is None else output_suffix
    return (output_prefix, output_suffix)
Пример #51
0
def check_create_dir(path):
    """
    Make sure the directory specified by the path exists.
    """
    if not os.path.isdir(path):
        try:
            os.mkdirs(path)
        except OSError:
            logger.error("cannot create {} directory".format(path))
            sys.exit(1)
def copytree(src, dst):
    for root, dirs, files in os.walk(src):
        if not os.path.isdir(root):
            os.mkdirs(root)
        for each_file in files:
            rel_path = root.replace(src, "").lstrip(os.sep)
            dest_path = os.path.join(dst, rel_path, each_file)
            if not os.path.isdir(os.path.dirname(dest_path)):
                os.makedirs(os.path.dirname(dest_path))
            copy2(os.path.join(root, each_file), dest_path)
 def __init__(self, args, experiment=None):
     self.mW = args.east_width  # model width
     self.mH = args.east_height  # model heigth
     print("[INFO] loading EAST text detector...")
     tf.reset_default_graph()
     self.net = cv2.dnn.readNet(args.east)
     self.experiment = experiment
     self.args = args
     if not os.path.exists(args.debug_folder):
         os.mkdirs(args.debug_folder)
Пример #54
0
def import_shaders(filename):
    filepath = os.path.dirname(filename)
    filename = os.path.basename(filename)
    writepath = 'build/%s' % filepath;
    os.mkdirs(writepath);
    with open('%s/%s' % (writepath, filename), 'w') as f:
        f.write('foo')
    f.closed

    print('running on %s' % filename, file=sys.stderr)
Пример #55
0
 def dizinler_olstr(ad):
     try:
         os.makedirs(ad)
     except FileExistsError:
         sil_onay = input("Bu klasör zaten var.Eskisi silinsin mi?(e/h)\n")
         if sil_onay.lower == "h":
             pass
         elif sil_onay.lower == "e":
             os.removedirs(ad)
             os.mkdirs(ad)
Пример #56
0
    def do_job_setup(self, job):
        if not os.path.isdir(job.get_tmp()):
            os.mkdirs(job.get_tmp())
        self.joblog=job.get_log()
        if os.path.exists(self.joblog):
            os.unlink(self.joblog)

        print '******************'
        print "doing job", job.name
        print '******************'
  def run():
    if Utils.isTodayHoliday():
      logging.info("Cannot start TradeManager as Today is Trading Holiday.")
      return

    if Utils.isMarketClosedForTheDay():
      logging.info("Cannot start TradeManager as Market is closed for the day.")
      return

    Utils.waitTillMarketOpens("TradeManager")

    # check and create trades directory for today`s date
    serverConfig = getServerConfig()
    tradesDir = os.path.join(serverConfig['deployDir'], 'trades')
    TradeManager.intradayTradesDir =  os.path.join(tradesDir, Utils.getTodayDateStr())
    if os.path.exists(TradeManager.intradayTradesDir) == False:
      logging.info('TradeManager: Intraday Trades Directory %s does not exist. Hence going to create.', TradeManager.intradayTradesDir)
      os.mkdirs(TradeManager.intradayTradesDir)

    # start ticker service
    brokerName = Controller.getBrokerName()
    if brokerName == "zerodha":
      TradeManager.ticker = ZerodhaTicker()
    #elif brokerName == "fyers" # not implemented
    # ticker = FyersTicker()

    TradeManager.ticker.startTicker()
    TradeManager.ticker.registerListener(TradeManager.tickerListener)

    # sleep for 2 seconds for ticker connection establishment
    time.sleep(2)

    # Load all trades from json files to app memory
    TradeManager.loadAllTradesFromFile()

    # track and update trades in a loop
    while True:
      if Utils.isMarketClosedForTheDay():
        logging.info('TradeManager: Stopping TradeManager as market closed.')
        break

      try:
        # Fetch all order details from broker and update orders in each trade
        TradeManager.fetchAndUpdateAllTradeOrders()
        # track each trade and take necessary action
        TradeManager.trackAndUpdateAllTrades()
      except Exception as e:
        logging.exception("Exception in TradeManager Main thread")

      # save updated data to json file
      TradeManager.saveAllTradesToFile()
      
      # sleep for 30 seconds and then continue
      time.sleep(30)
      logging.info('TradeManager: Main thread woke up..')
Пример #58
0
def main():

    # make_print_to_file(path='/results')

    args = parse_args()

    print_args(args)

    set_seed(args.seed)

    # load data
    train_data, val_data, test_data, vocab = loader.load_dataset(args)

    args.id2word = vocab.itos

    # initialize model
    model = {}
    model["G"], model["D"] = get_embedding(vocab, args)
    model["clf"] = get_classifier(model["G"].ebd_dim, args)

    if args.mode == "train":
        # train model on train_data, early stopping based on val_data
        train(train_data, val_data, model, args)

    # val_acc, val_std, _ = test(val_data, model, args,
    #                                         args.val_episodes)

    test_acc, test_std, drawn_data = test(test_data, model, args,
                                          args.test_episodes)

    # path_drawn = args.path_drawn_data
    # with open(path_drawn, 'w') as f_w:
    #     json.dump(drawn_data, f_w)
    #     print("store drawn data finished.")

    # file_path = r'../data/attention_data.json'
    # Print_Attention(file_path, vocab, model, args)

    if args.result_path:
        directory = args.result_path[:args.result_path.rfind("/")]
        if not os.path.exists(directory):
            os.mkdirs(directory)

        result = {
            "test_acc": test_acc,
            "test_std": test_std,
            # "val_acc": val_acc,
            # "val_std": val_std
        }

        for attr, value in sorted(args.__dict__.items()):
            result[attr] = value

        with open(args.result_path, "wb") as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
Пример #59
0
def BRAINSCutGenerateProbabilityMap(configurationFilename, probabilityMapDict,
                                    gaussianSigma, outputXmlFilename):
    print("""*****************************
          BRAINSCutGenerateProbabilityMap
          """)
    generateProbabilityMap = True
    createVectors = False
    trainModel = False
    applyModel = False
    dummyMethodParameter = {}

    print(("""generate probability map
           {str}
           """.format(str=probabilityMapDict)))

    import os

    for roi in list(probabilityMapDict.keys()):
        print((os.path.abspath(probabilityMapDict[roi])))
        probDir = os.path.dirname(os.path.abspath(probabilityMapDict[roi]))
        if not os.path.exists(probDir):
            os.mkdirs(probDir)
    dummyFilename = "na"
    dummyDict = {}
    createVectorsNormalization = "dummy"
    from ConfigurationParser import BRAINSCutCMDFromConfigFile

    returnList = BRAINSCutCMDFromConfigFile(
        configurationFilename,
        outputXmlFilename,
        probabilityMapDict,
        gaussianSigma,
        dummyFilename,
        dummyFilename,
        generateProbabilityMap,
        createVectors,
        createVectorsNormalization,
        trainModel,
        applyModel,
        dummyDict,
        dummyMethodParameter,
    )
    returnProbMapList = returnList["probabilityMap"]
    import sys

    if list(returnProbMapList.keys()) != list(probabilityMapDict.keys()):
        print("""ERROR
              returnProbMapList has to match probabilityMapDict
              in BRAINSCutGenerateProbabilityMap
              """)
        sys.exit()

    outputXmlFilename = os.path.abspath(outputXmlFilename)
    return returnProbMapList, outputXmlFilename, configurationFilename
Пример #60
0
    def save_model(self, model, save_dir, model_name, mtype='pkl'):
        from os.path import join, exists
        if not exists(save_dir):
            os.mkdirs(save_dir)

        if mtype == 'pkl':
            save_path = join(save_dir, model_name+'.pkl')
            torch.save(model.state_dict(), save_path)
        elif mtype == 'pth':
            save_path = join(save_dir, model_name+'.pth')
            torch.save(model.state_dict(), save_path)