Exemplo n.º 1
0
 def getParentPathId(self, path):
     """
     Video DB: Adds all subdirectories to SQL path while setting a "trail"
     of parentPathId
     """
     if "\\" in path:
         # Local path
         parentpath = "%s\\" % dirname(dirname(path))
     else:
         # Network path
         parentpath = "%s/" % dirname(dirname(path))
     pathid = self.getPath(parentpath)
     if pathid is None:
         self.cursor.execute("select coalesce(max(idPath),0) from path")
         pathid = self.cursor.fetchone()[0] + 1
         query = ' '.join((
             "INSERT INTO path(idPath, strPath)",
             "VALUES (?, ?)"
         ))
         self.cursor.execute(query, (pathid, parentpath))
         parentPathid = self.getParentPathId(parentpath)
         query = ' '.join((
             "UPDATE path",
             "SET idParentPath = ?",
             "WHERE idPath = ?"
         ))
         self.cursor.execute(query, (parentPathid, pathid))
     return pathid
Exemplo n.º 2
0
def ssis_proof(inputfn, outputfn=None):
    inputfolder = ntpath.dirname(inputfn)
    with open(inputfn, 'r') as f:
        contents = f.read()
        # Make sure all set catalog statments are properly commentted out for the 
        # SSIS loading framework
        contents = re.sub(r'(?<!--)(-+\s+)?(SET CATALOG )', '--SET CATALOG ', contents, flags=re.IGNORECASE)
        # Make sure all double quotes are escaped
        contents = re.sub(r'(?<!\\)"', r'\"',contents)
        # subsitute any quoted string
        contents = re.sub(r"([\"'])((?:\\\1|\1\1|(?P<quote>;)|(?!\1).)*)\1",semicolonrepl, contents)
        # Putting tokens in single quotes
        contents = re.sub(r"\((.*)\|\|''\)",r"'\1'",contents)
        # Putting tokens in single quotes
        contents = contents.replace(r"\\", r"\\\\")
        # Remove tailing and leading spaces
        contents = contents.strip()
        # Append start and end quote to contents
        contents = '"' + contents + '"'
        name = ntpath.splitext(ntpath.basename(inputfn))
        if outputfn is None:
            outputfn = name[0]+'_SSIS_READY'+name[1]
        if ntpath.dirname(outputfn) == '':
            outputfn = join(inputfolder, outputfn)
    with open(outputfn,'w') as o:
        o.write(contents)
def main(*argv):
    '''handles user input and creates a panel'''
    parser = argparse.ArgumentParser(description='This scripts takes networks and created the necessary file to make an interactive Hive panel')
    parser.add_argument('-input', help='Location of network file')
    parser.add_argument('-format', help='Input format of network')
    parser.add_argument('-nodes', help='Location of node network file')
    parser.add_argument('-edges', help='Location of edge network file')
    parser.add_argument('-title', help='Title/Name of graph')
    parser.add_argument('-folder', help='Output folder')
    parser.add_argument('-axes', help='Number of axes',default=NUM_AXES)
    parser.add_argument('-double', help='Makes hive plots with doubled axes', action = 'store_true')
    args = parser.parse_args()

    #Get graph in networkx format
    if args.format=='graphml':
        print "Reading .graphml as a networkx graph."
        G = import_graphml(args.input)
        title = basename(args.input).split('.')[0]
        folder = dirname(args.input)
    elif args.format=='txt':
        print "Reading .txt as a networkx graph."
        G = import_graph(args.nodes, args.edges)
        title = basename(args.nodes).split('.')[0]
        folder = dirname(args.nodes)
    else:
        print "Please specify the format of your network: .gexf, .graphml, or a 2 .txt files with node and edge attribute."
        parser.print_help()
        sys.exit()


    if args.title:
        title = args.title

    if args.folder:
        folder = args.folder

    #store all the plotting info in the graph as attributes
    G.graph['axes']=args.axes
    G.graph['double']=args.double
    G.graph['folder']=folder
    G.graph['title']=title
    G.graph['nodeAttributes'],G.graph['edgeAttributes']=get_all_attributes(G)
    for m in NODE_MEASURES:
        G.graph['nodeAttributes'].append(m.__name__)
        measures = m(G)
        nx.set_node_attributes(G,m.__name__,measures)

    for m in EDGE_MEASURES:
        G.graph['edgeAttributes'].append(m.__name__)
        measures = m(G)
        nx.set_edge_attributes(G,m.__name__,measures)

    for n,v in G.graph.iteritems():
        print n,v

    print 'Making panel.'
    make_panel(G,)
Exemplo n.º 4
0
    def initconfig(self):
        '''
        description:获得配置文件
        '''
        config = configparser.ConfigParser()

        # self.logger.info("配置文件为:%s" % os.path.realpath(__file__))
        configfile = os.path.join(os.path.split(sys.path[0])[0], "oommonitor.config")

        config.read(configfile)
        if len(config.sections()) == 0:
            self.logger.warn("不使用配置文件,系统将自动初始化配置信息,默认监测服务名为:%s", self.servicename)
        elif len(config.sections()) > 1:
            self.logger.error("配置文件配置错误!不允许多个配置项!CODE:%s%s",
                              len(config.sections()), self.log_new_line)
            sys.exit(-1)
        elif len(config.sections()) == 1:
            self.logger.info("配置文件为:%s", configfile)
            section = config.sections().pop()
            # 通过配置文件赋值
            # self.logger.info(self.__dict__)
            for key in config[section]:
                # self.logger.info("%s in self", (key in self.__dict__))
                if key in self.__dict__:
                    self.setattr(key, config[section][key])
            #日志的配置文件更新,从新获取logging
            current_dir = os.path.join(os.path.split(sys.path[0])[0], "oommonitor.log")
            handler = handlers.RotatingFileHandler(current_dir,
                                                   maxBytes=int(self.max_megabytes) * 1024 * 1024,
                                                   backupCount=int(self.backup_count))
            formatter = logging.Formatter(self.log_formatter)
            handler.setFormatter(formatter)
            self.logger.handlers[0] = handler

        # 系统内部自动赋值,即使通过配置文件设置,也要进行其他配置项的修复
        result = os.popen("sc qc %s" % self.servicename).read()
        binary_path_name = re.findall(r'([a-zA-Z]:(\\[\sA-Za-z0-9_\.-]*)+)', result)

        if len(self.imagename) == 0:
            self.imagename = path_leaf(binary_path_name[0][0]).strip()

        tomcat_home = ntpath.dirname(ntpath.dirname(binary_path_name[0][0]))

        if len(self.basedir) == 0:
            self.basedir = r"%s" % (tomcat_home + "\\logs")

        if len(self.backupname) == 0:
            self.backupname = r"%s" % (tomcat_home + "\\nis-logs")

        if len(self.nis_version) == 0:
            version_and_nisurl = parse_orcus_web_xml(tomcat_home +
                                                 "\\webapps\\nis\\WEB-INF\\orcus_web.xml")
            self.nis_version = version_and_nisurl[0]
            if compare_to(self.nis_version, self.landmark_nis_version) > 0:
                if len(self.oomrunurl) == 0:
                    self.oomrunurl = version_and_nisurl[1] + "oomrun"
Exemplo n.º 5
0
def _remove_old_files():
    """ Remove files from previous bundle """
    cache_file = '/var/local/cumulus-bundle-handler.cache'
    if sys.platform in ['win32', 'cygwin']:
        if not ospath.exists('C:\\cumulus\\cache'):
            os.makedirs('C:\\cumulus\\cache')
        cache_file = 'C:\\cumulus\\cache\\cumulus-bundle-handler.cache'

    if not ospath.exists(cache_file):
        LOGGER.info('No previous bundle files to clean up')
        return

    LOGGER.info('Removing old files and directories')

    with open(cache_file, 'r') as file_handle:
        for line in file_handle.readlines():
            line = line.replace('\n', '')

            if not ospath.exists(line):
                continue

            if ospath.isdir(line):
                try:
                    os.removedirs(line)
                    LOGGER.debug('Removing directory {}'.format(line))
                except OSError:
                    pass
            elif ospath.isfile(line):
                LOGGER.debug('Removing file {}'.format(line))
                os.remove(line)

                try:
                    os.removedirs(ospath.dirname(line))
                except OSError:
                    pass
            elif ospath.islink(line):
                LOGGER.debug('Removing link {}'.format(line))
                os.remove(line)

                try:
                    os.removedirs(ospath.dirname(line))
                except OSError:
                    pass
            else:
                LOGGER.warning('Unknown file type {}'.format(line))

    # Remove the cache file when done
    os.remove(cache_file)
Exemplo n.º 6
0
    def __init__(self, template_name, filename=None, outputfolder=None):
        self.template_name = template_name
        dirname = os.path.dirname(template_name)
        self.env = jinja2.Environment(loader=jinja2.FileSystemLoader(dirname),\
                                      extensions=["jinja2.ext.do"])
        
        
        self.env.tests['contains'] = is_contains
        self.env.tests['endswith'] = is_endswith
        self.env.tests['startswith'] = is_startswith
        
        self.env.filters['replace_all'] = filter_replace_all
        self.env.filters['split'] = filter_split
        self.env.filters['find_dict'] = fitler_find_dict

        self.env.trim_blocks=True       
        self.env.lstrip_blocks = True
        if filename is None: 
            self.filename = template_name 
        else: 
            self.filename = filename        
            
        if outputfolder is not None:
            self.outputfolder = outputfolder
            self.filename = os.path.join(self.outputfolder, ntpath.basename(self.filename))
        else:
            self.outputfolder = ntpath.dirname(filename)
        self.inputs = []
Exemplo n.º 7
0
def merge_data():
    # print train_data_dir + "/train_pair*"
    train_pairs = glob.glob(train_data_dir + "/*train_pairs*")
    print list(zip(train_pairs, list(xrange(0, 4))))

    for i, train_pair in enumerate(train_pairs):
        dir_name = ntpath.dirname(train_pair)
        pref = ntpath.basename(train_pair).split("train_pairs")[0]
        suffix = ntpath.basename(train_pair).split("train_pairs")[-1]
        # print pref, suffix
        info = dir_name + "/" + pref + "train_publicinfo" + suffix
        target = dir_name + "/" + pref + "train_target" + suffix
        print info, pref, suffix
        X = data_io.read_train_pairs(train_pair)
        y = data_io.read_train_target(target)
        inf_data = data_io.read_train_info(info)
        X, y, inf_data = process_indices(X, y, inf_data, i)
        if "X_merged" not in locals():
            X_merged = X
            y_merged = y
            info_merged = inf_data
        else:
            print "Shape before appending", X_merged.shape, y_merged.shape, X.shape, y.shape
            X_merged = X_merged.append(X)
            y_merged = y_merged.append(y)
            info_merged = info_merged.append(inf_data)
            print "Shape thus far", X_merged.shape, y_merged.shape

    return X_merged, y_merged, info_merged
Exemplo n.º 8
0
    def getRemoteTempPath(self):
        if not conf.tmpPath and Backend.isDbms(DBMS.MSSQL):
            _ = unArrayizeValue(inject.getValue("SELECT SERVERPROPERTY('ErrorLogFileName')", safeCharEncode=False))
            if _:
                conf.tmpPath = ntpath.dirname(_)

        if not conf.tmpPath:
            if Backend.isOs(OS.WINDOWS):
                if conf.direct:
                    conf.tmpPath = "%TEMP%"
                else:
                    self.checkDbmsOs(detailed=True)

                    if Backend.getOsVersion() in ("2000", "NT"):
                        conf.tmpPath = "C:/WINNT/Temp"
                    elif Backend.isOs("XP"):
                        conf.tmpPath = "C:/Documents and Settings/All Users/Application Data/Temp"
                    else:
                        conf.tmpPath = "C:/Windows/Temp"
            else:
                conf.tmpPath = "/tmp"

        if re.search(r"\A[\w]:[\/\\]+", conf.tmpPath, re.I):
            Backend.setOs(OS.WINDOWS)

        conf.tmpPath = normalizePath(conf.tmpPath)
        conf.tmpPath = ntToPosixSlashes(conf.tmpPath)

        hashDBWrite(HASHDB_KEYS.CONF_TMP_PATH, conf.tmpPath)

        return conf.tmpPath
Exemplo n.º 9
0
 def _adjustFileRef(self,fileRef,basedir):
    basename = ntpath.basename(fileRef['path'])
    dirname=ntpath.normpath(ntpath.join(basedir,ntpath.dirname(fileRef['path'])))
    retval=ntpath.join(dirname,basename)
    if os.path.sep == '/': #are we running in cygwin/Linux?
       retval = retval.replace(r'\\','/')
    return retval
Exemplo n.º 10
0
def _create_blender_textures_from_mod(mod, base_dir):
    textures = [None]  # materials refer to textures in index-1
    # TODO: check why in Arc.header.file_entries[n].file_path it returns a bytes, and
    # here the whole array of chars

    for i, texture_path in enumerate(mod.textures_array):
        path = texture_path[:].decode('ascii').partition('\x00')[0]
        folder = ntpath.dirname(path)
        path = os.path.join(base_dir, *path.split(ntpath.sep))
        path = '.'.join((path, 'tex'))
        if not os.path.isfile(path):
            # TODO: log warnings, figure out 'rtex' format
            continue
        tex = Tex112(path)
        try:
            dds = tex.to_dds()
        except TextureError as err:
            # TODO: log this instead of printing it
            print('Error converting "{}"to dds: {}'.format(path, err))
            textures.append(None)
            continue
        dds_path = path.replace('.tex', '.dds')
        with open(dds_path, 'wb') as w:
            w.write(dds)
        image = bpy.data.images.load(dds_path)
        texture = bpy.data.textures.new(os.path.basename(path), type='IMAGE')
        texture.image = image
        textures.append(texture)
        # saving meta data for export
        texture.albam_imported_texture_folder = folder
        texture.albam_imported_texture_value_1 = tex.unk_float_1
        texture.albam_imported_texture_value_2 = tex.unk_float_2
        texture.albam_imported_texture_value_3 = tex.unk_float_3
        texture.albam_imported_texture_value_4 = tex.unk_float_4
    return textures
 def openFile(self):
     print QtCore.QDir.currentPath()
     self.fileName = QtGui.QFileDialog.getOpenFileNames(self.win_plot, "Open ",
             self.openFileDirectory, "*.kmeans")
     if len(self.fileName) == 0:
         self.addLogText('no file was chosen !')
         return
     
     ppath = str(self.fixpath(self.fileName[0]))
     print ppath
     path = ntpath.dirname(ppath)
     print path
     self.openFileDirectory = path
     paths = self.fileName
    
     if not self.multipleFiles:      
         for idx, path in enumerate(paths):
             if idx == 0:
                 self.arrayFromFile = np.asarray(np.loadtxt(str(self.fixpath(path)), delimiter=","))
                 self.addLogText('load array shape : ', self.arrayFromFile.shape, textColor='green')
             else:
                 self.arrayFromFile = np.concatenate((self.arrayFromFile, np.asarray(np.loadtxt(str(self.fixpath(path)), delimiter=","))), axis=0)
                 self.addLogText('load array shape : ', self.arrayFromFile.shape, textColor='green')
     else:
         for path in paths:
             if self.arrayFromFile == None:
                 self.arrayFromFile = np.asarray(np.loadtxt(str(self.fixpath(path)), delimiter=","))
                 self.addLogText('load array shape : ', self.arrayFromFile.shape, textColor='green')
             else:
                 self.arrayFromFile = np.concatenate((self.arrayFromFile, np.asarray(np.loadtxt(str(self.fixpath(path)), delimiter=","))), axis=0)
                 self.addLogText('load array shape : ', self.arrayFromFile.shape, textColor='green')
Exemplo n.º 12
0
    def save_filter_settings(self):

        self.read_filter_table()
        self.convert_user_filters()

        filename = QtWidgets.QFileDialog.getSaveFileName(self,self.tr("Save file"), self.prev_dir_path, "Description Files (*.scda)")
        filename = filename[0]
        
        if (not filename):
            return

        try:
            filename.encode('ascii')
        except:
            msg = self.tr("Filenames with non-ASCII characters were found.\n\nThe application currently only supports ASCII filenames.")
            QtWidgets.QMessageBox.about(self, self.tr("Warning"), msg) 
            return
        
        self.prev_dir_path = ntpath.dirname(filename)

        try:        
            with open(filename, 'wb') as f:
                pickle.dump(self.user_filters_plain_format, f)
        except:
            msg = self.tr("Could not save file \"" + ntpath.basename(filename) + "\"")
            QtWidgets.QMessageBox.about(self, self.tr("Warning"), msg) 
            return            
            
        self.statusBar().showMessage(self.tr("File saved"),3000)  
Exemplo n.º 13
0
def main():
  # This command-line parsing code is provided.
  # Make a list of command line arguments, omitting the [0] element
  # which is the script itself.
  args = sys.argv[1:]

  if not args:
    print('usage: [--summaryfile] file [file ...]')
    sys.exit(1)

  # Notice the summary flag and remove it from args if it is present.
  summary = False
  if args[0] == '--summaryfile':
    summary = True
    del args[0]

  # +++your code here+++
  # For each filename, get the names, then either print the text output
  # or write it to a summary file
  sortednames = extract_names(args[0])

  if summary:
      # write to summary file
      summaryfilename = '%s\%s_summary.txt' % (ntpath.dirname(args[0]), str(sortednames[0]))
      sumfile = open(summaryfilename, mode='w+')
      for item in sortednames:
          sumfile.write(item+'\n')

      print(u"Summary file created ({0:s})...".format(summaryfilename))
  else:
    print(sortednames)
Exemplo n.º 14
0
    def __init__(self,save_path,datetime_str):
        '''
        Initializes the save_data object.

        :param save_path: The path for saving the data.
        :param datetime_str: the string containing date and time that is supposed to be unique for each simulation and is used as a suffix for file names.

        Main internal variables:

        * data: the main variable to be saved. It contains all the data about the positions of the NeuronGroup()s as well as the monitor results.
        * syntax_bank: Since the monitors are explicitly defined in the Globals(), extracting the data from them requires addressing their name explicitely. To automatize this process, the syntaxes for extracting the data from the target monitors are generated and saved in this variable, so that they can be run at the end of the simulation.
        '''
        self.save_path = save_path
        self.datetime_str = datetime_str
        self.save_filename = ntpath.basename(self.save_path)
        self.save_pure_filename = os.path.basename(os.path.splitext(self.save_path)[0])

        self.save_folder = ntpath.dirname(self.save_path)
        self.save_extension = os.path.splitext(self.save_path)[1]
        if os.getcwd() in self.save_path:
            print " -  The output of the system is saved in %s" %os.path.abspath(os.path.join(os.getcwd(), os.pardir))
            self.save_folder = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
            self.save_path = os.path.join(self.save_folder,self.save_filename)
        self.data = {}
        self.syntax_bank = []
Exemplo n.º 15
0
def removeNoise(filePath, noiseLocations, sensor='accelerometer'):

    up.plotFile(filePath, sensor)

    rawList = ud.readFile(filePath)
    rawList = ud.getDataBySensorType(list=rawList, datatype=sensor)

    fileName = ntpath.basename(filePath)
    dirName = ntpath.dirname(filePath)
    outPath = dirName+'/'+fileName+'.rfn'

    print 'total length of ', sensor, ' data is:', len(rawList)
    print 'noise is located at:', noiseLocations
    print 'refined data will be saved in:', outPath

    dataList = []
    noise = noiseLocations.pop(0)
    for i in range(0, len(rawList)):
        if i < noise[0] or i > noise[1]:
            dataList.append(rawList[i])

        if i >= noise[1]:
            if len(noiseLocations) > 0:
                noise = noiseLocations.pop(0)

    f = open(outPath, 'w')
    for d in dataList:
        f.write(json.dumps(d)+'\n')
    f.close()

    up.plotFile(outPath, sensor)
Exemplo n.º 16
0
def update_shader(cls, context, shader_name):

	settings = context.scene.glsl_shader_settings
	dict = {}

	properties = shader_props(cls)
	for prop in properties:

		value = getattr(settings, prop)
		print(value)

		# GLSL needs lower case booleans
		if value == True:
			value = 'true'
		if value == False:
			value = 'false'

		dict[prop] = value

	shader_location = os.path.join(ntpath.dirname(__file__), 'templates/shaders/' + shader_name)
	shader_file = open(shader_location, 'r')
	text = shader_file.read()

	template = Template(text)
	shader = template.substitute(**dict)

	text_block = bpy.data.texts[shader_name]
	text_block.from_string(shader)

	return
Exemplo n.º 17
0
def directoryPath(path):
    retVal = None
    if isWindowsPath(path):
        retVal = ntpath.dirname(path)
    else:
        retVal = posixpath.dirname(path)
    return retVal
Exemplo n.º 18
0
def main(args):

    burstTag = '{(.+?)}'

    if args.tag is not None:
        burstTag = args.tag

    print "Using Burst Tag: ", burstTag, ' on ', args.inputpdf
    outputpath = ntpath.dirname(args.inputpdf)
    if args.output is not None:
        outputpath = args.output
    filename = ntpath.basename(args.inputpdf)
    input1 = PdfFileReader(open(args.inputpdf, 'rb'))
    previousTag = "TAGNOTFOUND"
    currentTag = ""
    output = PdfFileWriter()
    outputpages = defaultdict(list)
    for page in input1.pages:
        pagecontent = page.extractText()
        tagSearch = re.search(burstTag, pagecontent)
        if tagSearch:
            currentTag = str(tagSearch.group(1))
            previousTag = currentTag
        else: #no tag was found, reuse the previous page's tag
            currentTag = previousTag
        outputpages[currentTag].append(page)
    for tag,pg in outputpages.iteritems():
        print tag
        output = PdfFileWriter()
        for outpage in pg:
            output.addPage(outpage)
        outputStream = file(outputpath + '\\' + tag + '-' + filename, "wb")
        output.write(outputStream)
Exemplo n.º 19
0
 def _create_file_name(self, in_path):
     """ Create new file name """
     file_name = ntpath.basename(in_path)
     dir_path = ntpath.dirname(in_path)
     out_ras_name = 'RES_' + file_name
     new_ras_path = os.path.join(dir_path, out_ras_name).replace('\\', '/')
     return new_ras_path
def fix_turnstile_data(filenames):
    """
    Filenames is a list of MTA Subway turnstile text files. A link to an example
    MTA Subway turnstile text file can be seen at the URL below:
    http://web.mta.info/developers/data/nyct/turnstile/turnstile_110507.txt

    As you can see, there are numerous data points included in each row of the
    a MTA Subway turnstile text file.

    You want to write a function that will update each row in the text
    file so there is only one entry per row. A few examples below:
    A002,R051,02-00-00,05-28-11,00:00:00,REGULAR,003178521,001100739
    A002,R051,02-00-00,05-28-11,04:00:00,REGULAR,003178541,001100746
    A002,R051,02-00-00,05-28-11,08:00:00,REGULAR,003178559,001100775

    Write the updates to a different text file in the format of "updated_" + filename.
    For example:
        1) if you read in a text file called "turnstile_110521.txt"
        2) you should write the updated data to "updated_turnstile_110521.txt"

    The order of the fields should be preserved.

    You can see a sample of the turnstile text file that's passed into this function
    and the the corresponding updated file in the links below:

    Sample input file:
    https://www.dropbox.com/s/mpin5zv4hgrx244/turnstile_110528.txt
    Sample updated file:
    https://www.dropbox.com/s/074xbgio4c39b7h/solution_turnstile_110528.txt
    """

    for name in filenames:
        with open(name, 'rb') as f:
            reader = csv.reader(f)
            result = []
            rowslice = 5

            for row in reader:
                colstart = 3
                colend = colstart + rowslice
                rowlen = len(row)

                while colend <= rowlen:
                    current = [row[0]] + [row[1]] + [row[2]]

                    # add rest of the row
                    for x in range(colstart, colend):
                        current += [row[x]]

                    # add row to result
                    if current:
                        result.append(current)

                    colend += rowslice
                    colstart += rowslice

        new_filename = ntpath.dirname(name) + r'\updated_' + ntpath.basename(name)
        with open(new_filename, 'wb') as f:
            writer = csv.writer(f)
            writer.writerows(result)
Exemplo n.º 21
0
    def perform_sync(self, share_name, path, conn_logger, log):
        self.redis_at_cache.write_last_access_time(share_name, path)

        ctxt = {
            'is_file': False,
            'needs_import': False,
        }

        file_metadata = yield self.fscache.metadata_object(share_name, path, log, include_children=False)

        if not file_metadata.exists():
            # We need to sync the parent directory, just in case we're trying to create a new file in a
            # not-yet-synced directory
            parent_dir = ntpath.dirname(path)
            if parent_dir != path:
                yield self.sync(share_name, parent_dir, conn_logger)
        else:
            # Is it a regular file ? Then download
            if file_metadata.is_file():
                ctxt['is_file'] = True
                # Create the containing directory
                self.create_dir_hierarchy(file_metadata.parent_metadata(), log)
                yield self.send_file(file_metadata, log, ctxt)

            # Is it a directory, then just create it
            elif file_metadata.is_dir():
                # Create the containing directory
                self.create_dir_hierarchy(file_metadata.parent_metadata(), log)
                self.fs.create_directory(file_metadata, log)
Exemplo n.º 22
0
def create_local_backup(filepath):
    directory = ntpath.dirname(filepath)
    filename = ntpath.basename(filepath)
    backupfile = "%s.tar.gz" % filename
    with cd(directory):
        sudo("tar -zcf %s %s" % (backupfile, filename))
        get(backupfile, backupfile)
Exemplo n.º 23
0
 def adjustDcfFileRef(self,dcf,basedir):
    for elem in dcf['fileRef']:
       basename = ntpath.basename(elem['path'])
       dirname=ntpath.normpath(ntpath.join(basedir,ntpath.dirname(elem['path'])))
       elem['path']=ntpath.join(dirname,basename)
       if os.path.sep == '/': #are we running in cygwin/Linux?
          elem['path'] = elem['path'].replace(r'\\','/')
Exemplo n.º 24
0
def is_path_of_method_in_package(class_name,method_name, path_list,d):
    for p in path_list:
        src_class_name, src_method_name, src_descriptor =  p.get_src(d.get_class_manager())
        package = ntpath.dirname(src_class_name)
        if (src_method_name == method_name and src_class_name == class_name) or package in class_name:
            return p
    return None
Exemplo n.º 25
0
def ExamineGDB(gdb):
    import ntpath, re
    reviewpath=ntpath.basename(gdb)

    from arcpy import env, ListWorkspaces, ListDatasets, ListTables, ListFeatureClasses, GetCount_management, Compact_management, ListFields
    #set the workspace from the config file
    env.workspace = ntpath.dirname(gdb)
    ng911 = gdb
    print "geodatabases"
    print ng911
    env.workspace = ng911
    datasets = ListDatasets()
    print "Datasets:"
    for dataset in datasets:
        print "     "+ str(dataset)
    tables = ListTables()
    print " tables:"
    for table in tables:
        fcc = GetCount_management(table)
        print "     "+str(table)
    fd = datasets[0]
    fcs = ListFeatureClasses("", "", fd)
    for fc in fcs:
        fields = ListFields(fc)
        fcc = GetCount_management(fc)
        print fc +", " + str(fcc) + " features"
        for field in fields:
            print "        "+str(field.name)+", "+str(field.type)
    checkfile = reviewpath+"/"+ntpath.basename(ng911)
    topo= fd+"/NG911_Topology"
    Compact_management(ng911)
Exemplo n.º 26
0
	def moveToolOutput(self, outputFilename):
		try:
			# first create the tool folder if it doesn't already exist
			tool = ntpath.basename(ntpath.dirname(str(outputFilename)))
			path = self.outputfolder+'/'+str(tool)
			if not os.path.exists(str(path)):
				os.makedirs(str(path))
			
			# check if the outputFilename exists, if not try .xml and .txt extensions (different tools use different formats)
			if os.path.exists(str(outputFilename)) and os.path.isfile(str(outputFilename)):
				shutil.move(str(outputFilename), str(path))
			# move all the nmap files (not only the .xml)
			elif os.path.exists(str(outputFilename)+'.xml') and os.path.exists(str(outputFilename)+'.nmap') and os.path.exists(str(outputFilename)+'.gnmap') and os.path.isfile(str(outputFilename)+'.xml') and os.path.isfile(str(outputFilename)+'.nmap') and os.path.isfile(str(outputFilename)+'.gnmap'):
				try:
					exportNmapToHTML(str(outputFilename))
					shutil.move(str(outputFilename)+'.html', str(path))
				except:
					pass

				shutil.move(str(outputFilename)+'.xml', str(path))
				shutil.move(str(outputFilename)+'.nmap', str(path))
				shutil.move(str(outputFilename)+'.gnmap', str(path))
			elif os.path.exists(str(outputFilename)+'.xml') and os.path.isfile(str(outputFilename)+'.xml'):
				shutil.move(str(outputFilename)+'.xml', str(path))
			elif os.path.exists(str(outputFilename)+'.txt') and os.path.isfile(str(outputFilename)+'.txt'):
				shutil.move(str(outputFilename)+'.txt', str(path))							
		except:
			print '[-] Something went wrong moving the tool output file..'
			print "[-] Unexpected error:", sys.exc_info()[0]
Exemplo n.º 27
0
    def load_filter_settings(self):

        filename = QtWidgets.QFileDialog.getOpenFileName(self,self.tr("Open file"), self.prev_dir_path, "Filter Settings Files (*.scda)")
        filename = filename[0]
        
        if (not filename):
            return

        try:
            filename.encode('ascii')
        except:
            msg = self.tr("Filenames with non-ASCII characters were found.\n\nThe application currently only supports ASCII filenames.")
            QtWidgets.QMessageBox.about(self, self.tr("Warning"), msg) 
            return
        
        self.prev_dir_path = ntpath.dirname(filename)

        try:
            with open(filename,'rb') as f:
                self.user_filters_plain_format = pickle.load(f)
        except:
            msg = self.tr("Could not read file \"" + ntpath.basename(filename) + "\"")
            QtWidgets.QMessageBox.about(self, self.tr("Warning"), msg) 
            return

        self.set_user_filters()
            
        self.statusBar().showMessage(self.tr("New filter settings loaded"),3000)
Exemplo n.º 28
0
	def openFile(self):
		filedialog = QtGui.QFileDialog()
		#filedialog.setNameFilter('*.jpg')
		filename = filedialog.getOpenFileName(self, 'Open File', os.path.expanduser("~"),"*.wlm")
		if filename == "":
			return
		f = open(str(filename), 'r') 
		justName = ntpath.basename(str(filename))
		self.__class__.currentFileDir = ntpath.dirname(str(filename))
		self.__class__.projectName = filename
		self.saveGlobal()
		print "self.currentFileDir:",  self.__class__.currentFileDir
		print "self.projectName:",  self.__class__.projectName
		self.grid = pickle.load(f) 
		f.close()
		self.currentHW = self.grid.pop(0)#get current hardware from file
		print self.currentHW
		self.chooseHW(self.currentHW)
		#check width and height, make bounding box
		ManageGrid(self.grid, self.scene,self.Tools,self.items).changeRectangle()
		
		try: ManageGrid(self.grid, self.scene,self.Tools,self.items).totalRedraw()
		except:
			self.dialog = popupDialogs.wrongVersionDialog()
			self.dialog.exec_()# For Modal dialogs
			self.setupDataGrid()
		else:
			self.reFillList(self.ui.tableWidget)
Exemplo n.º 29
0
def main(*argv):
    '''handles user input and creates a panel'''
    parser = argparse.ArgumentParser(description='This scripts takes networks and created the necessary file to make an interactive Hive panel')
    #parser.add_argument('-input', help='Location of network file')
    parser.add_argument('-nodes', help='Location of node network file')
    parser.add_argument('-edges', help='Location of edge network file')
    parser.add_argument('-title', help='Title/Name of graph')
    parser.add_argument('-folder', help='Output folder')
    parser.add_argument('-format', help='Input format of network', default = 'txt')
    args = parser.parse_args()

    # #Get graph in networkx format
    # if args.format=='graphml':
    #     print_message("Reading .graphml as a networkx graph.")
    #     G = import_graphml(args.input)
    #     title = splitext(basename(args.nodes))[0]
    #     folder = dirname(args.input)
    # elif
    if args.format=='txt':
        print_message("Reading .txt as a networkx graph.")
        G = import_graph(args.nodes, args.edges)
        title = splitext(basename(args.nodes))[0]
        folder = dirname(args.nodes)
    else:
        print_message("Please specify the format of your network. Currently only 2 txt or csv files are accepted")
    #     parser.print_help()
    #     sys.exit()


    if args.title:
        title = args.title

    if args.folder:
        folder = args.folder

    #store all the plotting info in the graph as attributes
    G.graph['axes']= NUM_AXES
    G.graph['double']= False
    G.graph['folder']=folder
    G.graph['title']=title
    G.graph['nodeAttributes'],G.graph['edgeAttributes']=get_all_attributes(G)
    for m in NODE_MEASURES:
        G.graph['nodeAttributes'].append(m.__name__)
        measures = m(G)
        nx.set_node_attributes(G,m.__name__,measures)

    for m in EDGE_MEASURES:
        G.graph['edgeAttributes'].append(m.__name__)
        measures = m(G)
        nx.set_edge_attributes(G,m.__name__,measures)

    print "PARAMETERS OF NETWORK:"
    print "     Network name:", G.graph['title']
    print "     Node attributes:\n\t\t", '\n\t\t'.join(G.graph['nodeAttributes'])
    print "     Edge attributes:\n\t\t", '\n\t\t'.join(G.graph['edgeAttributes'])
    print "     Output folder", G.graph['folder']

    print_message('Making panel.')
    make_panel(G,)
Exemplo n.º 30
0
def makeVideoClip(filePath, start, end, withAudio):
    originalVideo = VideoFileClip(filePath)
    if(originalVideo.duration < float(end) or float(start) < 0):
        raise Exception('Wrong clip interval')
    if(float(start) > float(end)):
        raise Exception('Wrong clip interval')
    video = originalVideo.subclip(float(start), float(end))
    clipFile = ntpath.dirname(filePath) + "/clip_" + ntpath.basename(filePath)
    video.write_videofile(clipFile, audio=withAudio) # Many options...
    return clipFile
Exemplo n.º 31
0
    def add_update(self, item, view=None):
        # Process single tvshow
        kodicursor = self.kodicursor
        emby = self.emby
        emby_db = self.emby_db
        artwork = self.artwork
        API = api.API(item)

        if settings('syncEmptyShows') == "false" and not item.get('RecursiveItemCount'):
            if item.get('Name', None) is not None:
                log.info("Skipping empty show: %s", item['Name'])
            return
        # If the item already exist in the local Kodi DB we'll perform a full item update
        # If the item doesn't exist, we'll add it to the database
        update_item = True
        force_episodes = False
        itemid = item['Id']
        emby_dbitem = emby_db.getItem_byId(itemid)
        try:
            showid = emby_dbitem[0]
            pathid = emby_dbitem[2]
            log.info("showid: %s pathid: %s", showid, pathid)

        except TypeError:
            update_item = False
            log.debug("showid: %s not found", itemid)
            showid = self.kodi_db.create_entry()

        else:
            # Verification the item is still in Kodi
            if self.kodi_db.get_tvshow(showid) is None:
                # item is not found, let's recreate it.
                update_item = False
                log.info("showid: %s missing from Kodi, repairing the entry", showid)
                # Force re-add episodes after the show is re-created.
                force_episodes = True


        if view is None:
            # Get view tag from emby
            viewtag, viewid = emby_db.getView_embyId(itemid)
            log.debug("View tag found: %s", viewtag)
        else:
            viewtag = view['name']
            viewid = view['id']

        # fileId information
        checksum = API.get_checksum()
        userdata = API.get_userdata()

        # item details
        genres = item['Genres']
        title = item['Name']
        plot = API.get_overview()
        rating = item.get('CommunityRating')
        votecount = item.get('VoteCount')
        premieredate = API.get_premiere_date()
        tvdb = API.get_provider('Tvdb')
        imdb = API.get_provider('Imdb')
        sorttitle = item['SortName']
        mpaa = API.get_mpaa()
        genre = " / ".join(genres)
        studios = API.get_studios()
        studio = " / ".join(studios)

        # Verify series pooling
        if not update_item and tvdb:
            query = "SELECT idShow FROM tvshow WHERE C12 = ?"
            kodicursor.execute(query, (tvdb,))
            try:
                temp_showid = kodicursor.fetchone()[0]
            except TypeError:
                pass
            else:
                emby_other = emby_db.getItem_byKodiId(temp_showid, "tvshow")
                if emby_other and viewid == emby_other[2]:
                    log.info("Applying series pooling for %s", title)
                    emby_other_item = emby_db.getItem_byId(emby_other[0])
                    showid = emby_other_item[0]
                    pathid = emby_other_item[2]
                    log.info("showid: %s pathid: %s", showid, pathid)
                    # Create the reference in emby table
                    emby_db.addReference(itemid, showid, "Series", "tvshow", pathid=pathid,
                                         checksum=checksum, mediafolderid=viewid)
                    update_item = True


        ##### GET THE FILE AND PATH #####
        playurl = API.get_file_path()

        if self.direct_path:
            # Direct paths is set the Kodi way
            if "\\" in playurl:
                # Local path
                path = "%s\\" % playurl
                toplevelpath = "%s\\" % dirname(dirname(path))
            else:
                # Network path
                path = "%s/" % playurl
                toplevelpath = "%s/" % dirname(dirname(path))

            if not self.path_validation(path):
                return False

            window('emby_pathverified', value="true")
        else:
            # Set plugin path
            toplevelpath = "plugin://plugin.video.emby.tvshows/"
            path = "%s%s/" % (toplevelpath, itemid)


        ##### UPDATE THE TVSHOW #####
        if update_item:
            log.info("UPDATE tvshow itemid: %s - Title: %s - ShowID: %s", itemid, title, showid)

            # update new ratings Kodi 17
            if self.kodi_version > 16:
                ratingid =  self.kodi_db.get_ratingid(showid)
                log.info("RETURNED ratingid: %s", ratingid)

                self.kodi_db.update_ratings(showid, "tvshow", "default", rating, votecount,ratingid)

            # update new uniqueid Kodi 17
            if self.kodi_version > 16:
                uniqueid =  self.kodi_db.get_uniqueid(showid)
                log.info("RETURNED uniqueid: %s", uniqueid)

                self.kodi_db.update_uniqueid(showid, "tvshow", imdb, "imdb",uniqueid)

            # Update the tvshow entry
            if self.kodi_version > 16:
                self.kodi_db.update_tvshow(title, plot, uniqueid, premieredate, genre, title,
                                             uniqueid, mpaa, studio, sorttitle, showid)
            else:
                self.kodi_db.update_tvshow(title, plot, rating, premieredate, genre, title,
                                           tvdb, mpaa, studio, sorttitle, showid)
            # Update the checksum in emby table
            emby_db.updateReference(itemid, checksum)

        ##### OR ADD THE TVSHOW #####
        else:
            log.info("ADD tvshow itemid: %s - Title: %s", itemid, title)

            # add new ratings Kodi 17
            if self.kodi_version > 16:
                ratingid =  self.kodi_db.create_entry_rating()

                self.kodi_db.add_ratings(ratingid, showid, "tvshow", "default", rating, votecount)

            # add new uniqueid Kodi 17
            if self.kodi_version > 16:
                uniqueid =  self.kodi_db.create_entry_uniqueid()

                self.kodi_db.add_uniqueid(uniqueid, showid, "tvshow", imdb, "imdb")

            # Add top path
            toppathid = self.kodi_db.add_path(toplevelpath)
            self.kodi_db.update_path(toppathid, toplevelpath, "tvshows", "metadata.local")

            # Add path
            pathid = self.kodi_db.add_path(path)

            # Create the tvshow entry
            if self.kodi_version > 16:
                self.kodi_db.add_tvshow(showid, title, plot, uniqueid, premieredate, genre,
                                    title, uniqueid, mpaa, studio, sorttitle)
            else:
                self.kodi_db.add_tvshow(showid, title, plot, rating, premieredate, genre,
                                        title, tvdb, mpaa, studio, sorttitle)

            # Create the reference in emby table
            emby_db.addReference(itemid, showid, "Series", "tvshow", pathid=pathid,
                                 checksum=checksum, mediafolderid=viewid)


        # Link the path
        self.kodi_db.link_tvshow(showid, pathid)

        # Update the path
        self.kodi_db.update_path(pathid, path, None, None)

        # Process cast
        people = artwork.get_people_artwork(item['People'])
        self.kodi_db.add_people(showid, people, "tvshow")
        # Process genres
        self.kodi_db.add_genres(showid, genres, "tvshow")
        # Process artwork
        artwork.add_artwork(artwork.get_all_artwork(item), showid, "tvshow", kodicursor)
        # Process studios
        self.kodi_db.add_studios(showid, studios, "tvshow")
        # Process tags: view, emby tags
        tags = [viewtag]
        tags.extend(item['Tags'])
        if userdata['Favorite']:
            tags.append("Favorite tvshows")
        self.kodi_db.add_tags(showid, tags, "tvshow")
        # Process seasons
        all_seasons = emby.getSeasons(itemid)
        for season in all_seasons['Items']:
            self.add_updateSeason(season, showid=showid)
        else:
            # Finally, refresh the all season entry
            seasonid = self.kodi_db.get_season(showid, -1)
            # Process artwork
            artwork.add_artwork(artwork.get_all_artwork(item), seasonid, "season", kodicursor)

        if force_episodes:
            # We needed to recreate the show entry. Re-add episodes now.
            log.info("Repairing episodes for showid: %s %s", showid, title)
            all_episodes = emby.getEpisodesbyShow(itemid)
            self.add_episodes(all_episodes['Items'], None)

        return True
Exemplo n.º 32
0
        filter(lambda b: b.properties.content_length != 0,
               block_blob_service.list_blobs('mwt-offline-eval'))))

eval_blobs.sort(key=lambda tup: tup[0])

print("Found {0} blobs".format(len(eval_blobs)))

# for blob in itertools.islice(generator, 5):
#     print(blob.name)
eval_out = open('c:\\temp\\eval.csv', 'w')
eval_out.write('"Name","AvgCost","time","window"\n')
for blob in eval_blobs:
    print(blob[1].name)
    fn = 'c:\\temp\\testdrive\\eval\\{0}'.format(blob[1].name)
    if not os.path.exists(fn):
        dn = ntpath.dirname(fn)
        if not os.path.exists(dn):
            os.makedirs(dn)
        block_blob_service.get_blob_to_path('mwt-offline-eval', blob[1].name,
                                            fn)

    f = open(fn, 'r')
    for line in f:
        if len(line) <= 1:
            continue
        js = json.loads(line)
        _ = eval_out.write("\"{0}\",{1},{2},{3}\n".format(
            js['name'], js['averagecost'], js['lastenqueuedtime'],
            js['window']))
    f.close()
eval_out.close()
Exemplo n.º 33
0
def create_georaster(tags):
    # print(tags)
    """
    :param tags:
    :return:
    """
    out_out = ntpath.dirname(indir + "/output/")
    print("out dir", out_out)
    if not os.path.exists(out_out):
        os.makedirs(out_out)
    bar = Bar('Creating GeoTIFFs', max=len(tags))

    for tag in iter(tags):

        coords = tag['geometry']['coordinates'][0]
        # lonlat = coords[0]
        pt0 = coords[0][0], coords[0][1]
        pt1 = coords[1][0], coords[1][1]
        pt2 = coords[2][0], coords[2][1]
        pt3 = coords[3][0], coords[3][1]

        # print("OMGOMG", poly)
        props = tag['properties']
        # print("PROPS", props)
        # print(props)
        file_in = indir + "/images/" + props['File_Name']
        # print("file In", file_in)
        new_name = ntpath.basename(file_in[:-3]) + 'tif'
        dst_filename = out_out + "/" + new_name
        ds = gdal.Open(file_in, 0)
        gt = ds.GetGeoTransform()
        cols = ds.RasterXSize
        rows = ds.RasterYSize
        ext = GetExtent(gt, cols, rows)
        ext0 = ext[0][0], ext[0][1]
        ext1 = ext[1][0], ext[1][1]
        ext2 = ext[2][0], ext[2][1]
        ext3 = ext[3][0], ext[3][1]

        gcp_string = '-gcp {} {} {} {} ' \
                     '-gcp {} {} {} {} ' \
                     '-gcp {} {} {} {} ' \
                     '-gcp {} {} {} {}'.format(ext0[0], ext0[1],
                                               pt2[0], pt2[1],
                                               ext1[0], ext1[1],
                                               pt3[0], pt3[1],
                                               ext2[0], ext2[1],
                                               pt0[0], pt0[1],
                                               ext3[0], ext3[1],
                                               pt1[0], pt1[1])

        gcp_items = filter(None, gcp_string.split("-gcp"))
        gcp_list = []
        for item in gcp_items:
            pixel, line, x, y = map(float, item.split())
            z = 0
            gcp = gdal.GCP(x, y, z, pixel, line)
            gcp_list.append(gcp)

        srs = osr.SpatialReference()
        srs.ImportFromEPSG(4326)
        wkt = srs.ExportToWkt()
        ds = gdal.Translate(dst_filename,
                            ds,
                            outputSRS=wkt,
                            GCPs=gcp_list,
                            noData=0)
        ds = None
        bar.next()
    bar.finish()
    return
Exemplo n.º 34
0
def gen_sch_ptr_table(mf_folder):
    """
    Extracts the sch reentry pointer positions from the VS*.bin file by
    analysising the sch.info file. These pointers are used to determine
    where to re-enter the sch table after an #exit command occured.

    Parameters
    ----------
    mf_folder : string
        absolute path to the folder with MF section files.

    """
    working_dir = mf_folder
    start_pts = []

    cfile = find("*.SCH.info", working_dir)[0]

    folder = ntpath.dirname(cfile) + '/'
    base_name = ntpath.basename(cfile).split(".")[0]

    with codecs.open(cfile, 'r', 'utf-8') as ifile:
        lines = ifile.read().split('\n')
        for j in range(0, len(lines)):
            if '#starta' in lines[j]:
                break
        for i in range(j, len(lines)):
            start_pts.append(i)
            if '#endz' in lines[i]:
                break
        for j in range(i, len(lines)):
            if '#start' in lines[j]:
                start_pts.append(j)

    cfile = find("*.SIF", working_dir)[0]
    with open(cfile, 'rb') as ifile:
        search = ifile.read()

    sch_size = int().from_bytes(search[0:2], 'big')
    tlm_size = int().from_bytes(search[2:4], 'big')
    san_size = int().from_bytes(search[4:6], 'big')

    cfile = find("VS*.bin", working_dir)[0]
    with open(cfile, 'rb') as ifile:
        bin_data = ifile.read()

    end_pos = bin_data.find(search)
    pos = end_pos - (sch_size + tlm_size + san_size) * 2

    start_pts = np.array(start_pts)
    sort_idx = np.argsort(start_pts)
    unsort_idx = np.argsort(sort_idx)
    start_pts = start_pts[sort_idx].tolist()
    ptr_lst = []
    start_ptr = start_pts.pop(0).to_bytes(2, 'big')
    while pos < end_pos:
        elem = bin_data[pos:pos + 2]
        if elem == start_ptr:
            ptr_lst.append(pos)
            try:
                start_ptr = start_pts.pop(0).to_bytes(2, 'big')
            except IndexError:
                break
        pos += 2

    ptr_lst = np.array(ptr_lst)
    ptr_lst = ptr_lst[unsort_idx].tolist()
    with open(folder + base_name + '.SCH.ptr', 'wb') as outfile:
        for ptr in ptr_lst:
            outfile.write(ptr.to_bytes(4, 'big'))
Exemplo n.º 35
0
def main(*argv):
    '''handles user input and creates a panel'''
    parser = argparse.ArgumentParser(
        description=
        'This scripts takes networks and created the necessary file to make an interactive Hive panel'
    )
    parser.add_argument('-input', help='Location of network file')
    parser.add_argument('-format', help='Input format of network')
    parser.add_argument('-nodes', help='Location of node network file')
    parser.add_argument('-edges', help='Location of edge network file')
    parser.add_argument('-title', help='Title/Name of graph')
    parser.add_argument('-folder', help='Output folder')
    parser.add_argument('-axes', help='Number of axes', default=NUM_AXES)
    parser.add_argument('-double',
                        help='Makes hive plots with doubled axes',
                        action='store_true')
    args = parser.parse_args()

    #Get graph in networkx format
    if args.format == 'graphml':
        print "Reading .graphml as a networkx graph."
        G = import_graphml(args.input)
        title = basename(args.input).split('.')[0]
        folder = dirname(args.input)
    elif args.format == 'txt':
        print "Reading .txt as a networkx graph."
        G = import_graph(args.nodes, args.edges)
        title = basename(args.nodes).split('.')[0]
        folder = dirname(args.nodes)
    else:
        print "Please specify the format of your network: .gexf, .graphml, or a 2 .txt files with node and edge attribute."
        parser.print_help()
        sys.exit()

    if args.title:
        title = args.title

    if args.folder:
        folder = args.folder

    #store all the plotting info in the graph as attributes
    G.graph['axes'] = args.axes
    G.graph['double'] = args.double
    G.graph['folder'] = folder
    G.graph['title'] = title
    G.graph['nodeAttributes'], G.graph['edgeAttributes'] = get_all_attributes(
        G)
    for m in NODE_MEASURES:
        G.graph['nodeAttributes'].append(m.__name__)
        measures = m(G)
        nx.set_node_attributes(G, m.__name__, measures)

    for m in EDGE_MEASURES:
        G.graph['edgeAttributes'].append(m.__name__)
        measures = m(G)
        nx.set_edge_attributes(G, m.__name__, measures)

    for n, v in G.graph.iteritems():
        print n, v

    print 'Making panel.'
    make_panel(G, )
Exemplo n.º 36
0
    def laser_data(self, **kwargs):
        '''
        Class method `laser_data` extracts laser data from the given file, assuming laser data is of type `sensor_msgs/LaserScan`.

        Parameters
        -------------
        kwargs
            variable keyword arguments

        Returns
        ---------
        `list`
            A list of strings. Each string will correspond to file path of CSV file that contains extracted data of laser scan type

        Example
        ----------
        >>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag') 
        >>> laserdatafile = b.laser_data()
        >>> print(laserdatafile)

        '''
        tstart =None
        tend = None
        
        type_to_look ="sensor_msgs/LaserScan"
        table_rows = self.topic_table[self.topic_table['Types']==type_to_look]
        topics_to_read = table_rows['Topics'].values
        message_counts = table_rows['Message Count'].values
        
        column_names = ["Time",
                                "header.seq", 
                                "header.frame_id", 
                                "angle_min" , 
                                "angle_max", 
                                "angle_increment", 
                                "time_increment", 
                                "scan_time", 
                                "range_min", 
                                "range_max"]

        for p in range(0, 182):
            column_names.append("ranges_" + str(p))
        for p in range(0, 182):
            column_names.append("intensities_" + str(p))

        all_msg = []
        csvlist = []
        for i in range(len(table_rows)):
            tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
            file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
            #msg_list = [LaserScan() for count in range(message_counts[i])]
            k = 0

            if sys.hexversion >= 0x3000000:
                opencall = open(file_to_write, "w", newline='')
            else:
                opencall = open(file_to_write, 'wb')

            with opencall as f:
                writer = csv.writer(f, delimiter=',')
                writer.writerow(column_names) # write the header
                for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend): 
                    #msg_list[k] = msg
                    
                    new_row = [t.secs + t.nsecs*1e-9, 
                                            msg.header.seq, 
                                            msg.header.frame_id, 
                                            msg.angle_min,
                                            msg.angle_max, 
                                            msg.angle_increment, 
                                            msg.time_increment, 
                                            msg.scan_time,  
                                            msg.range_min, 
                                            msg.range_max]

                    ranges = [None]*182
                    intensities = [None]*182

                    for ir, ran in enumerate(msg.ranges):
                        ranges[ir] = ran

                    for ir, ran in enumerate(msg.intensities):
                        intensities[ir] = ran

                    new_row  = new_row + ranges
                    new_row = new_row + intensities
                    writer.writerow(new_row)
                
                k = k + 1

            csvlist.append(file_to_write)
        return csvlist
coeffs = [[9.0]]
coeffs = [[2.0, 1.5, 2.5]]
#coeffs=[[11.0]]
#coeffs=[[7.5]]
#coeffs=[[1.0]]
coeffs = [[
    2.0, 3.5, 3.0, 2.5, 5.0, 4.5, 4.0, 6.5, 6.5, 6.0, 6.0, 6.0, 5.5, 5.5, 5.5,
    5.5
]]
#coeffs=[[11.0],
#        [9.0]]
for i in nominal_models:
    gas = ct.Solution(i)
    gas.name = 'igDelayRateSwap_' + i.split('\\')[-1].rstrip('.cti')
    gas2 = em.efficiency_rate_swap(gas, [val])
    newfilename = ntpath.dirname(i) + '\\modified2_' + ntpath.basename(i)
    new_file = ctiw.write(gas2, newfilename)
    modified_models.append(new_file)

conditionsTup = []
for n in np.arange(len(nominal_models)):
    for p in np.arange(len(P)):
        for i in np.arange(len(T)):
            for subf in np.arange(len(fuels[n])):
                oxidizer = {}
                oxidizer = {
                    'O2': coeffs[n][subf],
                    'N2': 3.76 * coeffs[n][subf]
                }
                conditionsTup.append([
                    nominal_models[n], modified_models[n], P[p], phi,
Exemplo n.º 38
0
    def _add_metatadata_listofstrings(self, key, value):
        if not value:
            logger.info("no values provided for {}, skipping".format(key))
            return
        value = convert_to_unicode(value)
        obj = self.metadata.setdefault(key, [])
        if key == "debug" or value not in obj:
            obj.append(value)

        if key == "filepath":
            # use ntpath instead of os.path so we are consistent across platforms. ntpath
            # should work for both windows and unix paths. os.path works for the platform
            # you are running on, not necessarily what the malware was written for.
            # Ex. when running mwcp on linux to process windows
            # malware, os.path will fail due to not handling
            # backslashes correctly.
            self.add_metadata("filename", ntpath.basename(value))
            self.add_metadata("directory", ntpath.dirname(value))

        if key == "c2_url":
            self.add_metadata("url", value)

        if key in ("c2_address", "proxy_address"):
            self.add_metadata("address", value)

        if key == "serviceimage":
            # we use tactic of looking for first .exe in value. This is
            # not guaranteed to be reliable
            if ".exe" in value:
                self.add_metadata("filepath", value[0:value.find(".exe") + 4])

        if key == "servicedll":
            self.add_metadata("filepath", value)

        if key == "ssl_cer_sha1":
            if not self.SHA1_RE.match(value):
                logger.error("Invalid SHA1 hash found: {!r}".format(value))

        if key in ("url", "c2_url"):
            # http://[fe80::20c:1234:5678:9abc]:80/badness
            # http://bad.com:80
            # ftp://127.0.0.1/really/bad?hostname=pwned
            match = self.URL_RE.search(value)
            if not match:
                logger.error("Error parsing as url: %s" % value)
                return

            if match.group("path"):
                self.add_metadata("urlpath", match.group("path"))

            if match.group("address"):
                address = match.group("address").rstrip(": ")
                if address.startswith("["):
                    # ipv6--something like
                    # [fe80::20c:1234:5678:9abc]:80
                    domain, found, port = address[1:].partition("]:")
                else:
                    domain, found, port = address.partition(":")

                if found:
                    if port:
                        if key == "c2_url":
                            self.add_metadata("c2_socketaddress",
                                              [domain, port, "tcp"])
                        else:
                            self.add_metadata("socketaddress",
                                              [domain, port, "tcp"])
                    else:
                        logger.error(
                            "Invalid URL {!r} found ':' at end without a port."
                            .format(address))
                else:
                    if key == "c2_url":
                        self.add_metadata("c2_address", address)
                    else:
                        self.add_metadata("address", address)
Exemplo n.º 39
0
            ["%f" % float(t) for t in tokens if isnumeric(t)])

        outputfile.write(out_str + '\n')
        print line
        print out_str

    outputfile.close()
    inputfile.close()


xxx_logic = [("kaponir", -90), ("eisk", 180), ("tramplin", 180)]

basicFilter = "*.scn"
path = cmds.fileDialog2(fileFilter=basicFilter, dialogStyle=2, fileMode=1)

main_dir = ntpath.dirname(path[0])
inputfile = open(path[0])

lands_counter = 0
light_masts = 0
sectors = 0
files_loaded = []
idx = 0
land_line = []
bak_files = []
mtl_files = []

for file in os.listdir(main_dir + "/Land"):
    if file.endswith(".mtl.bak"):
        bak_files.append(file)
    if file.endswith(".mtl"):
Exemplo n.º 40
0
    def process(self):
        sample = self.current_task.get_resource("sample")
        self.log.info("hostname: {}".format(socket.gethostname()))
        sha256sum = hashlib.sha256(sample.content).hexdigest()
        magic_output = magic.from_buffer(sample.content)
        self.log.info("running sample sha256: {}".format(sha256sum))

        timeout = self.current_task.payload.get('timeout') or 60 * 10
        hard_time_limit = 60 * 20
        if timeout > hard_time_limit:
            self.log.error("Tried to run the analysis for more than hard limit of %d seconds", hard_time_limit)
            return

        analysis_uid = self.current_task.uid
        override_uid = self.current_task.payload.get('override_uid')

        self.log.info(f"analysis UID: {analysis_uid}")

        if override_uid:
            analysis_uid = override_uid
            self.log.info(f"override UID: {override_uid}")
            self.log.info("note that artifacts will be stored under this overriden identifier")

        self.rs.set(f"drakvnc:{analysis_uid}", INSTANCE_ID, ex=3600)  # 1h

        workdir = '/tmp/drakrun/vm-{}'.format(int(INSTANCE_ID))

        extension = self.current_task.headers.get("extension", "exe").lower()
        if '(DLL)' in magic_output:
            extension = 'dll'
        self.log.info("Running file as %s", extension)

        file_name = self.current_task.payload.get("file_name", "malwar") + f".{extension}"
        # Alphanumeric, dot, underscore, dash
        if not re.match(r"^[a-zA-Z0-9\._\-]+$", file_name):
            self.log.error("Filename contains invalid characters")
            return
        self.log.info("Using file name %s", file_name)

        # Save sample to disk here as some branches of _get_start_command require file path.
        try:
            shutil.rmtree(workdir)
        except Exception as e:
            print(e)
        os.makedirs(workdir, exist_ok=True)
        with open(os.path.join(workdir, file_name), 'wb') as f:
            f.write(sample.content)

        start_command = self.current_task.payload.get("start_command", self._get_start_command(extension, sample, os.path.join(workdir, file_name)))
        if not start_command:
            self.log.error("Unable to run malware sample, could not generate any suitable command to run it.")
            return

        outdir = os.path.join(workdir, 'output')
        os.mkdir(outdir)
        os.mkdir(os.path.join(outdir, 'dumps'))

        metadata = {
            "sample_sha256": sha256sum,
            "magic_output": magic_output,
            "time_started": int(time.time())
        }

        with open(os.path.join(outdir, 'sample_sha256.txt'), 'w') as f:
            f.write(hashlib.sha256(sample.content).hexdigest())

        watcher_tcpdump = None
        watcher_dnsmasq = None

        for _ in range(3):
            try:
                self.log.info("running vm {}".format(INSTANCE_ID))
                watcher_dnsmasq = start_dnsmasq(INSTANCE_ID, self.config.config['drakrun'].get('dns_server', '8.8.8.8'))

                d_run.logging = self.log
                d_run.run_vm(INSTANCE_ID)

                watcher_tcpdump = start_tcpdump_collector(INSTANCE_ID, outdir)

                self.log.info("running monitor {}".format(INSTANCE_ID))

                kernel_profile = os.path.join(PROFILE_DIR, "kernel.json")
                runtime_profile = os.path.join(PROFILE_DIR, "runtime.json")
                with open(runtime_profile, 'r') as runtime_f:
                    rp = json.loads(runtime_f.read())
                    inject_pid = rp['inject_pid']
                    kpgd = rp['vmi_offsets']['kpgd']

                hooks_list = os.path.join(ETC_DIR, "hooks.txt")
                dump_dir = os.path.join(outdir, "dumps")
                drakmon_log_fp = os.path.join(outdir, "drakmon.log")

                injector_cmd = ["injector",
                                "-o", "json",
                                "-d", "vm-{vm_id}".format(vm_id=INSTANCE_ID),
                                "-r", kernel_profile,
                                "-i", inject_pid,
                                "-k", kpgd,
                                "-m", "writefile",
                                "-e", f"%USERPROFILE%\\Desktop\\{file_name}",
                                "-B", os.path.join(workdir, file_name)]

                self.log.info("Running injector...")
                injector = subprocess.Popen(injector_cmd, stdout=subprocess.PIPE)
                outs, errs = injector.communicate(b"", 20)

                if injector.returncode != 0:
                    raise subprocess.CalledProcessError(injector.returncode, injector_cmd)

                injected_fn = json.loads(outs)['ProcessName']
                net_enable = int(self.config.config['drakrun'].get('net_enable', '0'))

                if "%f" not in start_command:
                    self.log.warning("No file name in start command")

                cwd = subprocess.list2cmdline([ntpath.dirname(injected_fn)])
                cur_start_command = start_command.replace("%f", injected_fn)

                # don't include our internal maintanance commands
                metadata['start_command'] = cur_start_command
                cur_start_command = f"cd {cwd} & " + cur_start_command

                if net_enable:
                    cur_start_command = "ipconfig /renew & " + cur_start_command

                full_cmd = subprocess.list2cmdline(["cmd.exe", "/C", cur_start_command])
                self.log.info("Using command: %s", full_cmd)

                drakvuf_cmd = ["drakvuf",
                               "-o", "json",
                               "-x", "poolmon",
                               "-x", "objmon",
                               "-x", "socketmon",
                               "-j", "5",
                               "-t", str(timeout),
                               "-i", inject_pid,
                               "-k", kpgd,
                               "-d", "vm-{vm_id}".format(vm_id=INSTANCE_ID),
                               "--dll-hooks-list", hooks_list,
                               "--memdump-dir", dump_dir,
                               "-r", kernel_profile,
                               "-e", full_cmd]

                drakvuf_cmd.extend(self.get_profile_list())

                syscall_filter = self.config.config['drakrun'].get('syscall_filter', None)
                if syscall_filter:
                    drakvuf_cmd.extend(["-S", syscall_filter])

                with open(drakmon_log_fp, "wb") as drakmon_log:
                    drakvuf = subprocess.Popen(drakvuf_cmd, stdout=drakmon_log)

                    try:
                        exit_code = drakvuf.wait(timeout + 60)
                    except subprocess.TimeoutExpired as e:
                        logging.error("BUG: Monitor command doesn\'t terminate automatically after timeout expires.")
                        logging.error("Trying to terminate DRAKVUF...")
                        drakvuf.terminate()
                        drakvuf.wait(10)
                        logging.error("BUG: Monitor command also doesn\'t terminate after sending SIGTERM.")
                        drakvuf.kill()
                        drakvuf.wait()
                        logging.error("Monitor command was forcefully killed.")
                        raise e

                    if exit_code != 0:
                        raise subprocess.CalledProcessError(exit_code, drakvuf_cmd)
                break
            except subprocess.CalledProcessError:
                self.log.info("Something went wrong with the VM {}".format(INSTANCE_ID), exc_info=True)
            finally:
                try:
                    subprocess.run(["xl", "destroy", "vm-{}".format(INSTANCE_ID)], cwd=workdir, check=True)
                except subprocess.CalledProcessError:
                    self.log.info("Failed to destroy VM {}".format(INSTANCE_ID), exc_info=True)

                if watcher_dnsmasq:
                    watcher_dnsmasq.terminate()
        else:
            self.log.info("Failed to analyze sample after 3 retries, giving up.")
            return

        self.log.info("waiting for tcpdump to exit")

        if watcher_tcpdump:
            try:
                watcher_tcpdump.wait(timeout=60)
            except subprocess.TimeoutExpired:
                self.log.exception("tcpdump doesn't exit cleanly after 60s")

        self.crop_dumps(os.path.join(outdir, 'dumps'), os.path.join(outdir, 'dumps.zip'))
        if os.path.exists("/opt/procdot/procmon2dot"):
            self.generate_graphs(outdir)
        self.slice_logs(outdir)
        self.log.info("uploading artifacts")

        metadata['time_finished'] = int(time.time())

        with open(os.path.join(outdir, 'metadata.json'), 'w') as f:
            f.write(json.dumps(metadata))

        payload = {"analysis_uid": analysis_uid}
        payload.update(metadata)

        t = Task(
            {
                "type": "analysis",
                "kind": "drakrun",
                "quality": self.current_task.headers.get("quality", "high")
            },
            payload=payload
        )

        for resource in self.upload_artifacts(analysis_uid, workdir):
            t.add_payload(resource.name, resource)

        t.add_payload('sample', sample)
        self.send_task(t)
from tkinter import *
from tkinter import messagebox
from tkinter.font import Font
import sqlite3
import hashing_module
import vault_layout
import os
import ntpath


filepath = ntpath.dirname(__file__)
os.system(f'cd /d {filepath}')

class GUI(Tk):
    def __init__(self):
        super().__init__()
        self.pos_top = 700
        self.pos_bot = 300
        self.geometry(f'500x250+{self.pos_top}+{self.pos_bot}')
        self.resizable(False,False)
        self.title("Login")

        # ------------------Header--------------------------------------------
        font_head = Font(family = "", weight = "bold", size = 20)
        #-------------------Buttons,Labels,etc..------------------------------
        label_text = Label(self, text = "Encrypted File Vault", anchor = CENTER, font = font_head)
        label_text.pack(pady = 10)
        #username_frame
        frame_username = Frame(self)
        frame_username.pack()
Exemplo n.º 42
0
        'tmpDownloadPath': 'tmp',
        'musicDir': '',
    }

    parser['CONFIG'] = defaultConfig
    with open(f'{modulePath}/config.ini', 'w+') as f:
        parser.write(f)


def writeToConfig(key, value):
    parser.set('CONFIG', key, value)
    with open(f'{modulePath}/config.ini', 'w') as configfile:
        parser.write(configfile)


modulePath = dirname(__file__)

#loading config
parser = configparser.ConfigParser(allow_no_value=True)
parser.optionxform = str

if not os.path.exists(f'{modulePath}/config.ini'):
    createDefaultConfig(parser)

else:
    parser.read(f'{modulePath}/config.ini')

section = parser['CONFIG']

# global config variables
filePrependRE = compile(r'\d+_')
Exemplo n.º 43
0
import argparse
import sys
try:
    from ConfigParser import SafeConfigParser
except ImportError:
    from configparser import SafeConfigParser
from argparse import RawTextHelpFormatter

if sys.platform in ['win32', 'cygwin']:
    import ntpath as ospath
else:
    import os.path as ospath

SETTINGS = SafeConfigParser()
SETTINGS.read('{0}/settings.conf'.format(
    ospath.dirname(ospath.realpath(__file__))))

PARSER = argparse.ArgumentParser(
    description='Assign EC2 Elastic IP to the current instance',
    formatter_class=RawTextHelpFormatter)
PARSER.add_argument(
    '--version',
    action='count',
    help='Print the aws-ec2-assign-elastic-ip version and exit')
PARSER.add_argument('--region',
                    default='us-east-1',
                    help='AWS region. Default: us-east-1')
PARSER.add_argument('--access-key', help='AWS access key ID')
PARSER.add_argument('--secret-key', help='AWS secret access key ID')
PARSER.add_argument(
    '--dry-run',
Exemplo n.º 44
0
# See <http://www.gnu.org/licenses/>.

import getpass, ntpath, os, re, sys, string, xbmc, xbmcgui
from xml.dom.minidom import Document, parseString

# Global constants
#

VERSION = "0.2b3"			# Version
INIFILE = "emulators.ini"	# Manually editable emulator file
XMLFILE = "romlist.xml"		# Cached roms
DEBUG = 'true'				# Set to true to dispaly script info at the bottom of the screen

# Paths
HOMEDIR = os.getcwd().replace(";","")+"/"	# script.dirname #SELFNOTE: Remove trail slash, it's uncommon
SCRIPTHOME = ntpath.dirname(ntpath.dirname(HOMEDIR)) # blaat/.xbmc/scripts/My Scripts
SCRIPTUSR = "******"

# Distinct menus
MENU_EMULIST = int(1)
MENU_ROMLIST = int(2)

# Keymap
# See guilib/Key.h
ACTION_MOVE_UP =		int(3)
ACTION_MOVE_DOWN =		int(4)
ACTION_PARENT_DIR =		int(9)
ACTION_PREVIOUS_MENU =	int(10)
ACTION_SHOW_INFO =		int(11) # GREEN - 195
ACTION_CONTEXT_MENU =	int(117) # RED - 229
ACTION_SHOW_GUI =		int(18) # YELLOW - 213
def solver(width, conditions, results, output):
    phi = conditions[3]
    nominals = []
    modifieds = []
    for i in np.arange(len(phi)):
        try:
            oxidizer = conditions[5]
            nominal_model = conditions[0]
            modified_model = conditions[1]
            pressure = conditions[2]
            fuel = conditions[4]
            T = conditions[6]
            #print(phi[i],fuel,oxidizer)
            gas = ct.Solution(nominal_model)
            gas.TP = T, pressure * ct.one_atm
            results.append(
                ff.free_flame(phi[i],
                              fuel,
                              oxidizer,
                              gas,
                              width,
                              kinetic_sens=0,
                              energycon=True,
                              flamespeed_sens=1,
                              soret=False))
            results[-1].add_mechanism(nominal_model)
            results[-1].add_fuel(fuel)
            nominals.append(results[-1].solution['u'][0])
        except:
            nominals.append('failed')

    for i in np.arange(len(phi)):
        try:
            oxidizer = conditions[5]
            nominal_model = conditions[0]
            modified_model = conditions[1]
            pressure = conditions[2]
            fuel = conditions[4]
            T = conditions[6]
            gas2 = ct.Solution(modified_model)
            gas2.TP = T, pressure * ct.one_atm
            #print(phi[i],fuel,oxidizer)
            results.append(
                ff.free_flame(phi[i],
                              fuel,
                              oxidizer,
                              gas2,
                              width,
                              kinetic_sens=0,
                              energycon=True,
                              flamespeed_sens=1,
                              soret=False))
            results[-1].add_mechanism(modified_model)
            results[-1].add_fuel(fuel)
            #differences=results[-1].solution['u'][0]-results[-2].solution['u'][0]
            #percent_diff=100.0*np.divide(differences,results[-2].solution['u'][0])
            modifieds.append(results[-1].solution['u'][0])
        except:
            modifieds.append('failed')
        #results[-1].percent_diff(percent_diff)
    #print(phi)
    #print(nominals)


#    if 'failed' not in modifieds and 'failed' not in nominals:
#        plt.figure()
#        plt.plot(phi,nominals,'b-')
#        plt.plot(phi,modifieds,'r--')
#        plt.savefig(os.getcwd()+'\\figures\\collider_screening\\'+ntpath.dirname(nominal_model).split('\\')[-1]+'_'+fuel+'_'+str(pressure)+'atm'+str(T)+'K_flamespeed'+'.pdf',dpi=1200,bbox_inches='tight')
    if 'failed' in modifieds or 'failed' in nominals:
        tempn = []
        tempm = []
        tempp = []
        for i in np.arange(len(modifieds)):
            if modifieds[i] != 'failed' and nominals[i] != 'failed':
                tempp.append(phi[i])
                tempm.append(modifieds[i])
                tempn.append(nominals[i])
        phi = tempp
        nominals = tempn
        modifieds = tempm
    if len(nominals) > 0 and len(phi) > 0 and len(modifieds) > 0:
        plt.figure()
        plt.plot(phi, nominals, 'b-')
        plt.plot(phi, modifieds, 'r--')
        plt.savefig(os.getcwd() + '\\figures\\collider_screening\\' +
                    ntpath.dirname(nominal_model).split('\\')[-1] + '_' +
                    fuel + '_' + str(pressure) + 'atm' + str(T) +
                    'K_flamespeed' + '.pdf',
                    dpi=1200,
                    bbox_inches='tight')
        a = pd.DataFrame(columns=['phi', 'nominal', 'modified'])
        a['phi'] = phi
        a['nominal'] = nominals
        a['modified'] = modifieds
        a.to_csv(os.getcwd() + '\\figures\\collider_screening\\' +
                 ntpath.dirname(nominal_model).split('\\')[-1] + '_' + fuel +
                 '_' + str(pressure) + 'atm' + str(T) + 'K_flamespeed.csv',
                 index=False)
        diffs = np.subtract(nominals, modifieds)
        percent_diffs = 100.0 * np.divide(diffs, nominals)
        max_dif = np.max(np.abs(percent_diffs))

        with open(output, 'a') as f:
            f.write('Model: ' + nominal_model.split('\\')[-2] +
                    ', Pressure: ' + str(pressure) + ', Fuel: ' + fuel +
                    '\n   Max Percent Difference: ' + str(max_dif) + '\n')
    elif not modifieds:
        with open(output, 'a') as f:
            f.write('Model: ' + nominal_model.split('\\')[-2] +
                    ', Pressure: ' + str(pressure) + ', Fuel: ' + fuel +
                    '\n   Max Percent Difference: failed' + '\n')
    def process_pdf(self, pdf_file, output, service, generateIDs,
                    consolidate_header, consolidate_citations, force,
                    teiCoordinates):
        # check if TEI file is already produced
        # we use ntpath here to be sure it will work on Windows too
        pdf_file_name = ntpath.basename(pdf_file)
        if output is not None:
            filename = os.path.join(
                output,
                os.path.splitext(pdf_file_name)[0] + '.tei.xml')
        else:
            filename = os.path.join(
                ntpath.dirname(pdf_file),
                os.path.splitext(pdf_file_name)[0] + '.tei.xml')

        if not force and os.path.isfile(filename):
            print(
                filename,
                "already exist, skipping... (use --force to reprocess pdf input files)"
            )
            return

        print(pdf_file)
        files = {
            'input': (pdf_file, open(pdf_file, 'rb'), 'application/pdf', {
                'Expires': '0'
            })
        }

        the_url = 'http://' + self.config['grobid_server']
        if len(self.config['grobid_port']) > 0:
            the_url += ":" + self.config['grobid_port']
        the_url += "/api/" + service

        # set the GROBID parameters
        the_data = {}
        if generateIDs:
            the_data['generateIDs'] = '1'
        if consolidate_header:
            the_data['consolidateHeader'] = '1'
        if consolidate_citations:
            the_data['consolidateCitations'] = '1'
        if teiCoordinates:
            the_data['teiCoordinates'] = self.config['coordinates']

        res, status = self.post(url=the_url,
                                files=files,
                                data=the_data,
                                headers={'Accept': 'text/plain'})

        if status == 503:
            time.sleep(self.config['sleep_time'])
            return self.process_pdf(pdf_file, output)
        elif status != 200:
            print('Processing failed with error ' + str(status))
        else:
            # writing TEI file
            try:
                with io.open(filename, 'w', encoding='utf8') as tei_file:
                    tei_file.write(res.text)
            except OSError:
                print("Writing resulting TEI XML file %s failed" % filename)
                pass
Exemplo n.º 47
0
def run_tests(cl1_conn, cl2_conn, cl1_test_ip, cl2_test_ip, runtime, p_sizes,
              streams, timestamp, test_title, protocol, tcpwin, export_dir):
    series_time = str(timedelta(seconds = 2 * len(p_sizes) * (runtime + 30) + 20))
    tprint('\033[92mStarting ' + protocol + ' tests.\033[0m Expected run time: ' + series_time)
    top_dir_name = timestamp + '_' + protocol + '_' + str(streams) + '_st'
    common_filename = protocol + '_' + str(streams) + '_st_' + timestamp
    print_unit = 'Buffer' if protocol == 'TCP' else 'Datagram'
    raw_data_subdir="raw-data"
    dir_prep(join(export_dir, top_dir_name), raw_data_subdir)
    dir_time = join(export_dir, top_dir_name, raw_data_subdir, common_filename)
    html_name = join(export_dir, top_dir_name, common_filename + ".html")
    one2two_images = []
    two2one_images = []
    all_one2two_failed = False
    all_two2one_failed = False
    stop_server(cl1_conn, dir_time)
    stop_server(cl2_conn, dir_time)
    if cl1_conn.islocal() or cl2_conn.islocal():
        localpart = True
    else:
        localpart = False

    connlist = [
                [cl1_conn, cl2_conn, 'one2two', cl2_test_ip, one2two_images, 'Plotting cl1 --> cl2 summary...'],
                [cl2_conn, cl1_conn, 'two2one', cl1_test_ip, two2one_images, 'Plotting cl2 --> cl1 summary...']
               ]
    for c in connlist:
        [client_conn, server_conn, direction, server_addr, image_list, plot_message] = c
        tot_iperf_mean = -1.0
        iperf_tot = []
        mpstat_tot = []
        for p in p_sizes:
            size_name = format(p, '05d') + 'B'
            init_name = dir_time + '_' + direction + '_' + size_name
            iperf_sumname = dir_time + '_' + direction + '_iperf_summary'
            mpstat_sumname = dir_time + '_' + direction + '_mpstat_summary'
            combined_sumname = dir_time + '_' + direction + '_summary'
            print('++++++++++++++++++++++++++++++++++++++++++++++++++')
            try:
                run_server(protocol, init_name, dir_time, server_conn, tcpwin)
                test_completed, repetitions = run_client(server_addr, runtime, p, streams,
                                                         init_name, dir_time, protocol,
                                                         client_conn, localpart, tcpwin)
                stop_server(server_conn, dir_time)
                print('Parsing results...')
                if localpart:
                    mpstat_array, tot_mpstat_mean, tot_mpstat_stdev = get_mpstat_data_single(init_name + '_mpstat.dat')
                    mpstat_tot.append([ p, tot_mpstat_mean, tot_mpstat_stdev ])
                    export_single_data(mpstat_array, init_name + '_mpstat_processed.dat')
                    mpstat_single_file = basename(init_name + '_mpstat_processed.dat')
                else:
                    mpstat_single_file = None

                (iperf_array, tot_iperf_mean, tot_iperf_stdev, server_fault) =\
                get_iperf_data_single(init_name + '_iperf.dat', protocol, streams, repetitions)
                if server_fault == 'too_few':
                    print('\033[93mWARNING:\033[0m The server received fewer connections than expected.')
                elif server_fault == 'too_many':
                    print('\033[93mWARNING:\033[0m The server received more connections than expected.')

            except ValueError as err:
                tprint('\033[91mERROR:\033[0m ' + err.args[0] + ' Skipping test...')
                image_list.append(get_round_size_name(p, gap = True))
                iperf_tot.append([ -1, p, 0, 0, 0 ])
                print('==================================================')
                continue

            # Get the "humanly readable" rate and its units.
            # This is just to put in the output data file, not for any calculations.
            # The units will be constant, and will be fixed after the first measurement.
            try:
                hr_net_rate = tot_iperf_mean / float(rate_factor)
            except:
                _, rate_units, rate_factor = get_size_units_factor(tot_iperf_mean, rate=True)
                hr_net_rate = tot_iperf_mean / float(rate_factor)

            export_single_data(iperf_array, init_name + '_iperf_processed.dat')
            write_gp(init_name + '.plt', basename(init_name + '_iperf_processed.dat'),
                     mpstat_single_file, basename(init_name + '.png'),
                     tot_iperf_mean, protocol, streams, print_unit, cl1_pretty_name,
                     cl2_pretty_name, plot_type = 'singlesize', direction = direction,
                     finished = test_completed, server_fault = server_fault,
                     packet_size = p, tcpwin = tcpwin)
            print('Plotting...')
            pr = Popen([gnuplot_bin, basename(init_name + '.plt')],
                       cwd = dirname(dir_time))
            pr.wait()
            image_list.append(join(raw_data_subdir, basename(init_name + '.png')))
            iperf_tot.append([ yes_and_no(test_completed, server_fault), p,
                              tot_iperf_mean, tot_iperf_stdev, hr_net_rate ])
            print('==================================================')

        if tot_iperf_mean > 0.0:
            print(plot_message)
            np.savetxt(iperf_sumname + '.dat', iperf_tot, fmt='%g',
                       header= ('TestOK ' + print_unit +
                                'Size(B) BW(b/s) Stdev(b/s) BW(' +
                                rate_units + ')'))

            if localpart:
                np.savetxt(mpstat_sumname + '.dat', mpstat_tot, fmt = '%g',
                           header = print_unit + 'Size(B) Frac Stdev')
                mpstat_ser_file = basename(mpstat_sumname + '.dat')
            else:
                mpstat_ser_file = None

            non_failed_BW = [l[2] for l in iperf_tot if l[2]]
            tot_iperf_mean = sum(non_failed_BW)/len(non_failed_BW)
            write_gp(combined_sumname + '.plt', basename(iperf_sumname + '.dat'),
                     mpstat_ser_file, basename(combined_sumname + '.png'),
                     tot_iperf_mean, protocol, streams, print_unit, cl1_pretty_name,
                     cl2_pretty_name, plot_type = 'multisize', direction = direction,
                     server_fault = np.array(iperf_tot)[:,0], packet_size = np.mean(p_sizes),
                     tcpwin = tcpwin)
            pr = Popen([gnuplot_bin, basename(combined_sumname + '.plt')], cwd=dirname(dir_time))
            pr.wait()
        elif direction == 'one2two':
            all_one2two_failed = True
        else:
            all_two2one_failed = True

    print('Exporting html...')
    gen_html(test_title,
             join(raw_data_subdir, common_filename + '_one2two_summary.png'),
             join(raw_data_subdir, common_filename + '_two2one_summary.png'),
             one2two_images, two2one_images, html_name, protocol, streams,
             all_one2two_failed, all_two2one_failed, print_unit, localpart,
             cl1_pretty_name, cl2_pretty_name, tcpwin)
    def process_batch_txt(self, txt_list, input2, output, n, service,
                          generateIDs, consolidate_header,
                          consolidate_citations, force, teiCoordinates,
                          batch_size_txt):
        print(len(txt_list), "citations to process")

        if output is not None:
            filename = os.path.join(
                output,
                os.path.splitext(input2)[0] + "_" + '0' + '.tei.xml')
        else:
            filename = os.path.join(
                ntpath.dirname(input2),
                os.path.splitext(input2)[0] + "_" + '0' + '.tei.xml')

        if not force and os.path.isfile(filename):
            print(
                filename,
                "already exist, skipping... (use --force to reprocess pdf input2 files)"
            )
            return

        amount_processed = 0
        thousands = 0
        if (n > 1):
            with concurrent.futures.ProcessPoolExecutor(
                    max_workers=n) as executor:
                for txt_el in txt_list:
                    executor.submit(self.process_txt, filename, txt_el, input2,
                                    output, service, generateIDs,
                                    consolidate_header, consolidate_citations,
                                    force, teiCoordinates)
                    amount_processed += 1
                    if (amount_processed % batch_size_txt == 0):
                        thousands += 1
                        if output is not None:
                            filename = os.path.join(
                                output,
                                os.path.splitext(input2)[0] + "_" +
                                str(thousands) + '.tei.xml')
                        else:
                            filename = os.path.join(
                                ntpath.dirname(input2),
                                os.path.splitext(input2)[0] + "_" +
                                str(thousands) + '.tei.xml')

                        if not force and os.path.isfile(filename):
                            print(
                                filename,
                                "already exist, skipping... (use --force to reprocess pdf input2 files)"
                            )
                            return
        else:
            for txt_el in txt_list:
                self.process_txt(filename, txt_el, input2, output, service,
                                 generateIDs, consolidate_header,
                                 consolidate_citations, force, teiCoordinates)
                amount_processed += 1
                if (amount_processed % batch_size_txt == 0):
                    thousands += 1
                    if output is not None:
                        filename = os.path.join(
                            output,
                            os.path.splitext(input2)[0] + "_" +
                            str(thousands) + '.tei.xml')
                    else:
                        filename = os.path.join(
                            ntpath.dirname(input2),
                            os.path.splitext(input2)[0] + "_" +
                            str(thousands) + '.tei.xml')

                    if not force and os.path.isfile(filename):
                        print(
                            filename,
                            "already exist, skipping... (use --force to reprocess pdf input2 files)"
                        )
                        return

        # fixing XML files
        xml_beg = [
            '<?xml version="1.0" ?>\n<TEI xmlns="http://www.tei-c.org/ns/1.0" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML">\n\t<teiHeader>\n\t\t<fileDesc xml:id="f_1"/>\n\t</teiHeader>\n\t<text>\n\t\t<front/>\n\t\t<body/>\n\t\t<back>\n\t\t\t<listBibl>\n'
        ]
        xml_end = ['\t\t\t</listBibl>\n\t\t</back>\n\t</text>\n</TEI>']
        for j in range(0, thousands + 1):
            if output is not None:
                filename = os.path.join(
                    output,
                    os.path.splitext(input2)[0] + "_" + str(j) + '.tei.xml')
            else:
                filename = os.path.join(
                    ntpath.dirname(input2),
                    os.path.splitext(input2)[0] + "_" + str(j) + '.tei.xml')
            with open(filename) as f:
                content = f.readlines()
                content = ["\t\t\t\t" + bibls for bibls in content]
                content = xml_beg + content + xml_end
                with open(filename, 'w') as f:
                    for item in content:
                        f.write("%s" % item)
Exemplo n.º 49
0
def gen_san_ptr_table(mf_folder):
    """
    Extracts the san entry pointer positions from the VS*.bin file by
    analysising the san.info file. These pointers are used to determine
    where to enter the sch table for animations.

    Parameters
    ----------
    mf_folder : string
        absolute path to the folder with MF section files.

    """
    working_dir = mf_folder

    cfile = find("*.SCH.info", working_dir)[0]
    with codecs.open(cfile, 'r', 'utf-8') as ifile:
        sch_data = ifile.read().split('\n')
    while sch_data[-1] == '':
        sch_data.pop(-1)

    cfile = find("*.SAN.info", working_dir)[0]

    folder = ntpath.dirname(cfile) + '/'
    base_name = ntpath.basename(cfile).split(".")[0]

    sch_ptr_lst = []
    with open(cfile, 'r') as ifile:
        lines = ifile.read().split('\n')
    while lines[-1] == '':
        lines.pop(-1)

    for line in lines:
        line = line.split(';')
        item = min([item.split(',')[0] for item in line])
        try:
            sch_ptr = find_item(sch_data, item)
            sch_ptr_lst.append(sch_ptr)
        except ValueError:
            pass

    cfile = find("*.SIF", working_dir)[0]
    with open(cfile, 'rb') as ifile:
        search = ifile.read()

    sch_size = int().from_bytes(search[0:2], 'big')
    tlm_size = int().from_bytes(search[2:4], 'big')
    san_size = int().from_bytes(search[4:6], 'big')

    cfile = find("VS*.bin", working_dir)[0]
    with open(cfile, 'rb') as ifile:
        bin_data = ifile.read()

    end_pos = bin_data.find(search)
    pos = end_pos - (sch_size + tlm_size + san_size) * 2

    sch_ptr_lst = np.array(sch_ptr_lst)
    sort_idx = np.argsort(sch_ptr_lst)
    unsort_idx = np.argsort(sort_idx)
    sch_ptr_lst = sch_ptr_lst[sort_idx].tolist()
    ptr_lst = []
    sch_ptr = sch_ptr_lst.pop(0).to_bytes(2, 'big')
    while pos < end_pos:
        elem = bin_data[pos:pos + 2]
        if elem == sch_ptr:
            ptr_lst.append(pos)
            try:
                while sch_ptr == sch_ptr_lst[0].to_bytes(2, 'big'):
                    ptr_lst.append(pos)
                    sch_ptr = sch_ptr_lst.pop(0).to_bytes(2, 'big')
                sch_ptr = sch_ptr_lst.pop(0).to_bytes(2, 'big')
            except IndexError:
                pass
        pos += 2

    ptr_lst = np.array(ptr_lst)
    ptr_lst = ptr_lst[unsort_idx].tolist()
    with open(folder + base_name + '.SAN.ptr', 'wb') as outfile:
        for ptr in ptr_lst:
            outfile.write(ptr.to_bytes(4, 'big'))
Exemplo n.º 50
0
    def open_value(self, path):
        key = self.open_key(ntpath.dirname(path))

        return key.open_value(ntpath.basename(path))
Exemplo n.º 51
0
    def wrench_data(self, **kwargs):
        '''
        Class method `wrench_data` extracts velocity data from the given file, assuming laser data is of type `geometry_msgs/Wrench`.

        Parameters
        -------------
        kwargs
            variable keyword arguments

        Returns
        ---------
        `list`
            A list of strings. Each string will correspond to file path of CSV file that contains extracted data of geometry_msgs/Wrench type

        Example
        ----------
        >>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag') 
        >>> wrenchdatafile = b.wrench_data()
        >>> print(wrenchdatafile)

        '''
        tstart = None
        tend = None
        
        type_to_look ="geometry_msgs/Wrench"
        table_rows = self.topic_table[self.topic_table['Types']==type_to_look]
        topics_to_read = table_rows['Topics'].values
        message_counts = table_rows['Message Count'].values
        
        column_names = ["Time",
                                "force.x", 
                                "force.y", 
                                "force.z" , 
                                "torque.x", 
                                "torque.y", 
                                "torque.z"]

        csvlist = []
        for i in range(len(table_rows)):
            tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
            file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
            k = 0

            if sys.hexversion >= 0x3000000:
                opencall = open(file_to_write, "w", newline='')
            else:
                opencall = open(file_to_write, 'wb')

            with opencall as f:
                writer = csv.writer(f, delimiter=',')
                writer.writerow(column_names) # write the header
                for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend):
                    
                    new_row = [t.secs + t.nsecs*1e-9, 
                                            msg.force.x, 
                                            msg.force.y,
                                            msg.force.z,
                                            msg.torque.x,
                                            msg.torque.y,
                                            msg.torque.z]

                    writer.writerow(new_row)
                
                k = k + 1

            csvlist.append(file_to_write)
        return csvlist
def run_pelicun(DL_input_path,
                EDP_input_path,
                DL_method,
                realization_count,
                EDP_file,
                DM_file,
                DV_file,
                output_path=None,
                detailed_results=True,
                log_file=True):

    DL_input_path = os.path.abspath(DL_input_path)  # BIM file
    EDP_input_path = os.path.abspath(EDP_input_path)  # dakotaTab

    # If the output dir was not specified, results are saved in the directory of
    # the input file.
    if output_path is None:
        output_path = ntpath.dirname(DL_input_path)

    # delete output files from previous runs
    files = os.listdir(output_path)
    for filename in files:
        if (filename[-3:] == 'csv') and (('DL_summary' in filename) or
                                         ('DMG' in filename) or
                                         ('DV_' in filename) or
                                         ('EDP' in filename)):
            try:
                os.remove(posixpath.join(output_path, filename))
            except:
                pass

    # If the event file is specified, we expect a multi-stripe analysis...
    try:
        # Collect stripe and rate information for every event
        with open(DL_input_path, 'r') as f:
            event_list = json.load(f)['Events'][0]

        df_event = pd.DataFrame(columns=['name', 'stripe', 'rate', 'IM'],
                                index=np.arange(len(event_list)))

        for evt_i, event in enumerate(event_list):
            df_event.iloc[evt_i] = [
                event['name'], event['stripe'], event['rate'], event['IM']
            ]

        # Create a separate EDP input for each stripe
        EDP_input_full = pd.read_csv(EDP_input_path,
                                     sep='\s+',
                                     header=0,
                                     index_col=0)

        # EDP_input_full.to_csv(EDP_input_path[:-4]+'_1.out', sep=' ')

        stripes = df_event['stripe'].unique()
        EDP_files = []
        IM_list = []
        num_events = []
        num_collapses = []
        for stripe in stripes:
            events = df_event[df_event['stripe'] == stripe]['name'].values

            EDP_input = EDP_input_full[EDP_input_full['MultipleEvent'].isin(
                events)]

            EDP_files.append(EDP_input_path[:-4] + '_{}.out'.format(stripe))

            EDP_input.to_csv(EDP_files[-1], sep=' ')

            IM_list.append(
                df_event[df_event['stripe'] == stripe]['IM'].values[0])

            # record number of collapses and number of events per stripe
            PID_columns = [col for col in list(EDP_input)
                           if 'PID' in col]  # list of column headers with PID
            num_events.append(EDP_input.shape[0])
            count = 0
            for row in range(num_events[-1]):
                print(row)
                for col in PID_columns:
                    if EDP_input.iloc[row][
                            col] >= 0.20:  # TODO: PID collapse limit as argument
                        count += 1
                        break
            num_collapses.append(count)

        # fit lognormal distribution to all points by maximum likelihood estimation (MLE)
        theta, beta = lognormal_MLE(IM_list, num_events, num_collapses)
        beta_adj = np.sqrt(
            beta**2 + 0.35**2
        )  # TODO: adjust dispersion by 0.35 to account for modeling uncertainty
        print("theta: " + str(theta))
        print("beta_adj: " + str(beta_adj))

        # write BIM file with new probability of collapse for each IM
        DL_files = []
        for i in range(len(stripes)):
            DL_input_stripe = update_collapsep(DL_input_path, stripes[i],
                                               theta, beta_adj, IM_list[i])
            DL_files.append(DL_input_stripe)

    except:  # run analysis for single IM
        stripes = [1]
        EDP_files = [EDP_input_path]
        DL_files = [DL_input_path]

    # run the analysis and save results separately for each stripe
    #print(stripes, EDP_files)

    for s_i, stripe in enumerate(stripes):

        DL_input_path = DL_files[s_i]

        # read the type of assessment from the DL input file
        with open(DL_input_path, 'r') as f:
            DL_input = json.load(f)

        # check if the DL input file has information about the loss model
        if 'DamageAndLoss' in DL_input:
            pass
        else:
            # if the loss model is not defined, give a warning
            print(
                'WARNING No loss model defined in the BIM file. Trying to auto-populate.'
            )

            EDP_input_path = EDP_files[s_i]

            # and try to auto-populate the loss model using the BIM information
            DL_input, DL_input_path = auto_populate(DL_input_path,
                                                    EDP_input_path, DL_method,
                                                    realization_count)

        DL_method = DL_input['DamageAndLoss']['_method']

        stripe_str = '' if len(stripes) == 1 else str(stripe) + '_'

        if DL_method == 'FEMA P58':
            A = FEMA_P58_Assessment(log_file=log_file)
        elif DL_method in ['HAZUS MH EQ', 'HAZUS MH']:
            A = HAZUS_Assessment(hazard='EQ', log_file=log_file)
        elif DL_method == 'HAZUS MH HU':
            A = HAZUS_Assessment(hazard='HU', log_file=log_file)

        A.read_inputs(
            DL_input_path, EDP_files[s_i],
            verbose=False)  # make DL inputs into array of all BIM files

        A.define_random_variables()

        A.define_loss_model()

        A.calculate_damage()

        A.calculate_losses()

        A.aggregate_results()

        A.save_outputs(output_path,
                       EDP_file,
                       DM_file,
                       DV_file,
                       stripe_str,
                       detailed_results=detailed_results)

    return 0
Exemplo n.º 53
0
    def message_by_topic(self, topic):
        '''
        Class method `message_by_topic` to extract message from the ROS Bag by topic name `topic`

        Parameters
        ---------------
        topic: `str`
            
            Topic from which to extract messages.
        Returns
        ---------
        `str`
            The name of the csv file to which data from the `topic` has been extracted

        Example
        -----------
        >>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag') 
        >>> msg_file = b.message_by_topic(topic='/catvehicle/vel')

        '''

        msg_list = []
        tstart =None
        tend = None
        time = []
        for topic, msg, t in self.reader.read_messages(topics=topic, start_time=tstart, end_time=tend): 
            time.append(t)
            msg_list.append(msg)

        msgs = msg_list

        if len(msgs) == 0:
            print("No data on the topic:{}".format(topic))
            return None

        # set column names from the slots
        cols = ["Time"]
        m0 = msgs[0]
        slots = m0.__slots__
        for s in slots:
            v, s = slotvalues(m0, s)
            if isinstance(v, tuple):
                snew_array = [] 
                p = list(range(0, len(v)))
                snew_array = [s + "_" + str(pelem) for pelem in p]
                s = snew_array
            
            if isinstance(s, list):
                for i, s1 in enumerate(s):
                    cols.append(s1)
            else:
                cols.append(s)
        
        tempfile = self.datafolder + "/" + topic.replace("/", "-") + ".csv"
        file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]

        if sys.hexversion >= 0x3000000:
            opencall = open(file_to_write, "w", newline='')
        else:
            opencall = open(file_to_write, 'wb')

        with opencall as f:
            writer = csv.writer(f, delimiter=',')
            writer.writerow(cols) # write the header
            for i, m in enumerate(msgs):
                slots = m.__slots__
                vals = []
                vals.append(time[i].secs + time[i].nsecs*1e-9)
                for s in slots:
                    v, s = slotvalues(m, s)
                    if isinstance(v, tuple):
                        snew_array = [] 
                        p = list(range(0, len(v)))
                        snew_array = [s + "_" + str(pelem) for pelem in p]
                        s = snew_array

                    if isinstance(s, list):
                        for i, s1 in enumerate(s):
                            vals.append(v[i])
                    else:
                        vals.append(v)
                writer.writerow(vals)

        return file_to_write
Exemplo n.º 54
0
 def __add_metatadata_listofstrings(self, keyu, value):
     
     try:
         valueu = self.convert_to_unicode(value)
         if keyu not in self.metadata:
             self.metadata[keyu] = []
         if valueu not in self.metadata[keyu] or self.__disablevaluededup:  
             self.metadata[keyu].append(valueu)
         
         if not self.__disableautosubfieldparsing:
             if keyu == "filepath":
                 #use ntpath instead of os.path so we are consistant across platforms. ntpath should work for both windows and unix paths.
                 #os.path works for the platform you are running on, not necessarily what the malware was written for. 
                 #Ex. when running mwcp on linux to process windows malware, os.path will fail due to not handling backslashes correctly.
                 self.add_metadata("filename", ntpath.basename(valueu))
                 self.add_metadata("directory", ntpath.dirname(valueu))
             if keyu == "c2_url":
                 self.add_metadata("url", valueu)
             if keyu == "c2_address":
                 self.add_metadata("address", valueu)
             if keyu == "serviceimage":
                 #we use tactic of looking for first .exe in value. This is not garunteed to be reliable
                 if '.exe' in valueu:
                     self.add_metadata("filepath", valueu[0:valueu.find('.exe')+4])
             if keyu == "servicedll":
                 self.add_metadata("filepath", valueu)
             if keyu == "url" or keyu == "c2_url":
                 #http://[fe80::20c:1234:5678:9abc]:80/badness
                 #http://bad.com:80
                 #ftp://127.0.0.1/really/bad?hostname=pwned
                 match = re.search(r"[a-z\.-]{1,40}://(\[?[^/]+\]?)(/[^?]+)?",valueu)
                 if match:
                     if match.group(1):
                         address = match.group(1)
                         if address[0] == "[":
                             #ipv6--something like [fe80::20c:1234:5678:9abc]:80
                             parts = address.split("]")
                             if len(parts) > 1:
                                 if parts[1]:
                                     if keyu == "c2_url":
                                         self.add_metadata("c2_socketaddress", [parts[0][1:], parts[1][1:], "tcp"])
                                     else:
                                         self.add_metadata("socketaddress", [parts[0][1:], parts[1][1:], "tcp"])
                             else:
                                 if keyu == "c2_url":
                                     self.add_metadata("c2_address", parts[0][1:])
                                 else:
                                     self.add_metadata("address", parts[0][1:])
                         else:
                             #regular domain or ipv4--bad.com:80 or 127.0.0.1
                             parts = address.split(":")
                             if len(parts) > 1:
                                 if parts[1]:
                                     if keyu == "c2_url":
                                         self.add_metadata("c2_socketaddress", [parts[0], parts[1], "tcp"])
                                     else:
                                         self.add_metadata("socketaddress", [parts[0], parts[1], "tcp"])
                             else:
                                 if keyu == "c2_url":
                                     self.add_metadata("c2_address", parts[0])
                                 else:
                                     self.add_metadata("address", parts[0])
                     if match.group(2):
                         self.add_metadata("urlpath", match.group(2))
                 else:
                     self.debug("Error parsing as url: %s" % valueu )
                 
     except Exception as e:
         self.debug("Error adding metadata for key: %s\n%s" % (keyu, traceback.format_exc()))
Exemplo n.º 55
0
    def processFile(self, file_fullpath, hostID, instanceID, rowsData):
        rowNumber = 0
        check_tags = ['LastModified', 'FilePath']
        # the 'end' event signifies when the end of the XML node has been reached,
        # and therefore when all values can be parsed
        try:
            xml_data = loadFile(file_fullpath)
            for event, element in etree.iterparse(xml_data, events=("end", )):
                skip_entry = False
                tag_dict = {}
                if element.tag == "PersistenceItem":
                    self._processElement(element, tag_dict)

                    # Check we have everything we need and ignore entries with critical XML errors on them
                    for tag in check_tags:
                        if tag in tag_dict:
                            if tag_dict[tag] is None:
                                if 'AppCompatPath' in tag_dict:
                                    logger.warning(
                                        "Malformed tag [%s: %s] in %s, entry: %s (skipping entry)"
                                        % (tag, tag_dict[tag],
                                           tag_dict['AppCompatPath'],
                                           file_fullpath))
                                else:
                                    logger.warning(
                                        "Malformed tag [%s: %s] in %s, entry: Unknown (skipping entry)"
                                        % (tag, tag_dict[tag], file_fullpath))
                                skip_entry = True
                                break
                    # If the entry is valid do some housekeeping:
                    if not skip_entry:
                        if tag_dict['ExecutionFlag'] == '1':
                            tmpExecFlag = True
                        elif tag_dict['ExecutionFlag'] == '0':
                            tmpExecFlag = False
                        else:
                            tmpExecFlag = tag_dict['ExecutionFlag']
                        namedrow = settings.EntriesFields(
                            HostID=hostID,
                            EntryType=settings.__APPCOMPAT__,
                            RowNumber=rowNumber,
                            InstanceID=instanceID,
                            LastModified=(tag_dict['LastModified'].replace(
                                "T", " ").replace("Z", "")
                                          if 'LastModified' in tag_dict else
                                          '0001-01-01 00:00:00'),
                            LastUpdate=(tag_dict['LastUpdate'].replace(
                                "T", " ").replace("Z", "")
                                        if 'LastUpdate' in tag_dict else
                                        '0001-01-01 00:00:00'),
                            FileName=ntpath.basename(tag_dict['FilePath']),
                            FilePath=ntpath.dirname(tag_dict['FilePath']),
                            Size=(tag_dict['Size']
                                  if 'Size' in tag_dict else 'N/A'),
                            ExecFlag=tmpExecFlag)
                        rowsData.append(namedrow)
                        rowNumber += 1
            else:
                pass
                element.clear()
            xml_data.close()
        except Exception as e:
            print e.message
            print traceback.format_exc()
            pass
Exemplo n.º 56
0
import json
import sys
import os
import ntpath
import telebot
from telebot import types
import requests
import time
from lxml import html
import swarfarm
import thread
from flask import Flask
print('running...')
swarfarm = swarfarm.Swarfarm()

scriptPath = ntpath.dirname(sys.argv[0])

pageUrl = 'http://summonerswar.wikia.com/wiki/'
elements = ['light', 'dark', 'fire', 'water', 'wind']
print('running...')
# Run python svResponseBot.py <configfile> voor je eigen configfile ipv config.json
# Token in config_example.json is voor test bot (@sumwarbot)
if len(sys.argv) > 1:
    with open(sys.argv[1], 'r') as configFile:
        config = json.load(configFile)
    configFile.close()
    bot = telebot.TeleBot(config['token'])
else:
    #prolly running on horoku
    try:
        token = str(os.environ.get('token'))
Exemplo n.º 57
0
def parentdir(path):
    return posixpath.dirname(path) if '/' in path else ntpath.dirname(path)
Exemplo n.º 58
0
import numpy as np
#import matplotlib.pyplot as plt
import ntpath
from collections import Counter
import datetime as dt
date = dt.datetime.today().strftime("%m%d%Y")

###read in survey export file

results_path = 'C:\\Users\\KLA\\Dropbox\\MHC OPS Absent Narratives Approach\\Evaluation (all) 2017-2018\\2017-2018 Evaluation Methods\\Surveys\\Demographic Surveys\\demog_surveys_export_fa17_01182018.csv'
results = pd.read_csv(results_path, encoding="ISO-8859-1")

###create object for file name

#results_name = ntpath.basename(results_path).rstrip('_01182018.csv')
results_directory = ntpath.dirname(results_path)

###drop off extra header row

results = results.drop(0)
results = results.drop(1)

###drop off useless columns

del results['Status']
del results['IPAddress']
del results['Progress']
del results['Duration (in seconds)']
del results['Finished']
del results['StartDate']
del results['EndDate']
import ntpath
import csv
import netmaskdictionary
import sys

# Import Dictionary of netmasks in CIDR
netmaskDict = netmaskdictionary.netmaskDict

print(
    'Input the file location in full directory format, IE: C:\\temp\\file.txt')
fileLocation = input('Location of Bulk Import CSV File: ')
# Save file directory for use in writing output file at the end of the script
fileDir = ntpath.dirname(fileLocation)

print(
    'If the addresses will be imported into Panorama, enter the Device Group. Otherwise, leave the device group blank.'
)
deviceGroup = input('Device Group (leave blank if there is no Panorama): ')

#Turn CSV into a nested dictionary
addressDict = {}
try:
    with open(fileLocation) as csv_file:
        csv_reader = csv.DictReader(csv_file, delimiter=',')
        line_count = 0
        for row in csv_reader:
            tempDict = {}
            for i in row.keys():
                tempDict[i] = row[i]
            addressDict[line_count] = tempDict
            line_count += 1
Exemplo n.º 60
0
 def __determineToolUsedByOutputFilename(outputFileName) -> str:
     return ntpath.basename(ntpath.dirname(outputFileName))