コード例 #1
0
def send(sock, addr, device_list, port):
    global bt
    print(str(port))
    if sock == "":
        if bt:
            sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
        else:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        #try:
        sock.connect((addr, port))
        #except:
        #	print("The other user has been disconnected")
        #	sock.close()
        #	start(device_list)
    source = input("Please enter the name and destination of the file you wish to send: ")
    try:
        name = ntpath.basename(source) + "\n"
        size = str(os.path.getsize(source)) + "\n"
        f = open(source, "rb")
    except:
        print("File does not appear to exist")
        send(sock, addr, device_list, port)
    l = f.read()
    print("Sending " + ntpath.basename(source))
    print(str(os.path.getsize(source)) + " bytes")
    name = bytes(name, 'utf-8')
    size = bytes(size, 'utf-8')
    sock.send(name)
    sock.send(size)
    sock.send(l)
    f.close()
    print("File transfer complete!")
    send(sock, addr, device_list, port)
コード例 #2
0
ファイル: project.py プロジェクト: thelordofcode3/StaCoAn
 def app_prepper(self):
     if not self.application_file.lower().endswith(tuple(self.apptypes)):
         Logger.logmodule[0]("No mobile app detected, exiting! Hgnnnhh", 1)
         sys.exit()
     if not os.path.exists(os.path.join(PATH,  ntpath.basename(self.application_file))):
         os.makedirs(os.path.join(PATH,  ntpath.basename(self.application_file)))
     if Project.development == 1:
         self.workfolder = os.path.join(PATH, os.path.splitext(os.path.basename(self.application_file))[0])
     else:
         zip_ref = zipfile.ZipFile(self.application_file, 'r')
         new_folder = os.path.join(PATH, os.path.splitext(os.path.basename(self.application_file))[0])
         # Unpacking the .apk or .ipa file (it is just a ZIP archive)
         zip_ref.extractall(new_folder)
         zip_ref.close()
         self.workfolder = new_folder
         # For Android: decompile with JADX
         if self.application_file.lower().endswith("apk"):
             jadx_folder = os.path.join(new_folder, "jadx_source_code")
             print(jadx_folder)
             if not os.path.exists(jadx_folder):
                 os.makedirs(jadx_folder)
             cmd = "\""+os.path.join(os.getcwd(), "jadx", "bin", "jadx") + '\" -d \"' +jadx_folder + "\" " + self.application_file
             print(cmd)
             jadx_process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
             output_jadx = "--------- JADX OUTPUT BELOW --------- \n "
             for line in jadx_process.stdout:
                 output_jadx += str(line)
             Logger.logmodule[0].log(str(output_jadx), 3)
             jadx_process.wait()
             Logger.logmodule[0].log("jadx return code: "+str(jadx_process.returncode), 3)
         # TO DO: ipa decompiling tool
         elif self.application_file.lower().endswith("ipa"):
             Logger.logmodule[0].log(".ipa files not implemented yet.", 1)
             sys.exit()
コード例 #3
0
  def run(self, configFile, otu, threads):
    rc = ReadConfig()
    projectParams, sampleParams = rc.readConfig(configFile, outputDirExists = False)

    ggDB = self.ggDB.replace('##', str(otu), 1)
    print 'Mapping reads to the GreenGenes DB at: ' + ggDB + '\n'

    if not os.path.exists(ggDB + '.amb'):
      print 'Indexing GreenGenes DB:'
      os.system('bwa index -a is ' + ggDB)
      print ''
    else:
      print 'GreenGenes DB is already indexed.\n'

    for sample in sampleParams:
      print 'Mapping reads in sample: ' + sample

      pairs = sampleParams[sample]['pairs']
      singles = sampleParams[sample]['singles']

      # align and map each pair
      for i in xrange(0, len(pairs), 2):
        pair1 = pairs[i]
        pair2 = pairs[i+1]
        bamPrefix = projectParams['output_dir'] + ntpath.basename(pair1)
        mapPair(ggDB, pair1, pair2, bamPrefix, threads)

      # align and map each single-ended read file
      for i in xrange(0, len(singles)):
        bamPrefix = projectParams['output_dir'] + ntpath.basename(singles[i])
        mapSingle(ggDB, singles[i], bamPrefix, threads)
コード例 #4
0
def lambda_handler(event, context):
    run = False
    try:
        run = event['wet']
    except KeyError:
        pass

    source = {}
    result = []
    try:
        for obj in s3res.Bucket('source_bucket').objects.filter(Prefix='source_folder/'):
            source[ntpath.basename(obj.key)] = ''

        for obj in s3res.Bucket('target_bucket').objects.filter(Prefix='target_folder/'):
            basename = ntpath.basename(obj.key)
            if basename not in source:
                print('Deleting ' + basename)
                result.append(basename)
                if run == True:
                    s3cli.delete_object(Bucket='target_bucket',Key='target_folder/'+basename)
        
        print('Deletion finished')
        return(result)
    except Exception as e:
        print(e)
        raise e
コード例 #5
0
def merge_data():
    # print train_data_dir + "/train_pair*"
    train_pairs = glob.glob(train_data_dir + "/*train_pairs*")
    print list(zip(train_pairs, list(xrange(0, 4))))

    for i, train_pair in enumerate(train_pairs):
        dir_name = ntpath.dirname(train_pair)
        pref = ntpath.basename(train_pair).split("train_pairs")[0]
        suffix = ntpath.basename(train_pair).split("train_pairs")[-1]
        # print pref, suffix
        info = dir_name + "/" + pref + "train_publicinfo" + suffix
        target = dir_name + "/" + pref + "train_target" + suffix
        print info, pref, suffix
        X = data_io.read_train_pairs(train_pair)
        y = data_io.read_train_target(target)
        inf_data = data_io.read_train_info(info)
        X, y, inf_data = process_indices(X, y, inf_data, i)
        if "X_merged" not in locals():
            X_merged = X
            y_merged = y
            info_merged = inf_data
        else:
            print "Shape before appending", X_merged.shape, y_merged.shape, X.shape, y.shape
            X_merged = X_merged.append(X)
            y_merged = y_merged.append(y)
            info_merged = info_merged.append(inf_data)
            print "Shape thus far", X_merged.shape, y_merged.shape

    return X_merged, y_merged, info_merged
コード例 #6
0
    def geoprocess_raster(self, file_path):
        """ Raster geoprocessing interface """
        current_ref, target_ref = self._get_spatial_ref(file_path, self.target_ref)
        file_name = ntpath.basename(file_path)[5:]
        proj_out_ras = file_path
        if not self.mosaic_operation:
            file_name = ntpath.basename(file_path)
            try:
                if current_ref.name != target_ref.name:
                    # Reproject input raster
                    proj_out_ras = self._reproject_raster(current_ref, target_ref, file_path)
                else:
                    raise ValueError('Raster processing FAILED! {0} projection is similar to that of the target reference.'.format(current_ref.name))
            except ValueError as e:
                print(e)

        # Convert raster values to float values
        ndvi_out_ras = self._raster_to_float(proj_out_ras)
        arcpy.Delete_management(proj_out_ras)

        # Clip raster to area of interest
        clip_out_ras = self. _clip_raster(ndvi_out_ras)
        arcpy.Delete_management(ndvi_out_ras)

        # Set bad raster values to null
        where_clause_val1 = "VALUE > 1"
        where_clause_val2 = "VALUE < -1"
        masked_file = self.place_name + file_name[-54:]
        if self.mosaic_operation:
            masked_file = self.place_name + file_name[-53:]
        masked_out_ras = self._raster_setnull(clip_out_ras, masked_file, where_clause_val1, where_clause_val2)
        self.clean_ras.append(masked_out_ras)
        return clip_out_ras
コード例 #7
0
ファイル: app.py プロジェクト: milindmaha/SearchPDF
def list_children_in_folder(path, isFolderSearch, client):
	item_list = []

	try:
		folder_metadata = client.metadata(path)
	except:
		print ('The Directory does not exist')
		sys.exit(-1)
	
	if folder_metadata['is_dir'] == "False":
		print ('This is not a Directory')
		sys.exit(1)

	for I in folder_metadata['contents']:
		if isFolderSearch:
			if I['is_dir'] == True:
				ntpath.basename(I['path'])
				head, tail = ntpath.split(I['path'])
				folder_info = { "folder_name" : decode_string(tail),
								"folder_path" : decode_string(I["path"])
							}
				item_list.append(folder_info)
		else:
			ntpath.basename(I['path'])
			head, tail = ntpath.split(I['path'])
			if I['is_dir'] == False:
				if tail.endswith('.pdf'):
					item_list.append(tail)

	return item_list
コード例 #8
0
ファイル: copy.py プロジェクト: UMDHackers/copy-all
def main(argv):
	if len(argv) == 1 or argv[1] == "help":
		help()
	else:
		file = argv[1]
		#Move the file to the USB!
		if(os.path.isfile(file)):
			file_name = ntpath.basename(file)
			shutil.copyfile(file, output+file_name)
		elif(os.path.isdir(file)):
			#type of file
			if(len(argv) > 2):
				details(argv)
			else:
				dir = ntpath.basename(file)
				shutil.copytree(file, output+dir)
		elif(file == "all"):
			if(len(argv) > 2):
				details(argv)
			else:
				dir_collection = []
				for path, dirs, files in os.walk(main_dir):
					# go only one level deep
					dir_collection = list(dirs)
					del dirs[:] # go only one level deep
				for d in dir_collection:
					shutil.copytree(d, output+d)
				for f in files:
					shutil.copyfile(f, output+f)
コード例 #9
0
    def processPairs(self, pairs, ggIdToTaxonomy, maxEditDistance, minLength, outputDir, prefix):
        for i in xrange(0, len(pairs), 2):
            pair1 = pairs[i]
            pair2 = pairs[i + 1]

            pair1Base = ntpath.basename(pair1)
            pair2Base = ntpath.basename(pair2)

            print 'Identifying 16S sequences in paired-end reads: ' + pair1 + ', ' + pair2

            # write out classifications for paired-end reads with both ends identified as 16S
            bamFile = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.intersect.16S.bam'
            readsMappedTo16S_1, readsMappedTo16S_2 = self.readPairedBAM(bamFile, ggIdToTaxonomy, maxEditDistance, minLength)

            output1 = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.intersect.16S.tsv'
            output2 = prefix + '.' + pair2Base[0:pair2Base.rfind('.')] + '.intersect.16S.tsv'
            print '  Paired results written to: '
            print '    ' + output1
            print '    ' + output2 + '\n'
            self.writeClassification(output1, readsMappedTo16S_1)
            self.writeClassification(output2, readsMappedTo16S_2)

            # write out classifications for paired-ends reads with only one end identified as 16S
            bamFile = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.difference.16S.bam'
            readsMappedTo16S = self.readSingleBAM(bamFile, ggIdToTaxonomy, maxEditDistance, minLength)

            output = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.difference.16S.tsv'
            print '  Singleton results written to: ' + output + '\n'
            self.writeClassification(output, readsMappedTo16S)
コード例 #10
0
 def contextMenuEvent(self, event):
     pos = event.pos()
     element = self.page().mainFrame().hitTestContent(pos)
     url = element.linkUrl().toString()
     image = element.imageUrl().toString()
     menu = self.page().createStandardContextMenu()
     action = menu.exec_(event.globalPos())
     try:
         if action.text() == "Open in New Window":
             self.fitxaBerrianIreki(url)
         if action.text() == "Save Link...":
             f, ext = os.path.splitext(url)
             if ext != "":
                 p = QtWidgets.QFileDialog.getSaveFileName(self, "Fitxategia gorde",
                     ntpath.basename(url), "(*" + ext +")")
                 if p[0] != "":
                     urllib.request.urlretrieve(url, p[0])
         if action.text() == "Save Image":
             f, ext = os.path.splitext(image)
             p = QtWidgets.QFileDialog.getSaveFileName(self, "Irudia gorde",
                 ntpath.basename(image), "(*" + ext +")")
             if p[0] != "":
                 urllib.request.urlretrieve(image, p[0])
     except AttributeError:
         None
コード例 #11
0
ファイル: movefile.py プロジェクト: dkapellusch/420-520
def main(argv):
    print("Hit it")
    file = argv
    failed = False      
    try:
        print("file name is" +str(file))
        file_name = ntpath.basename(file)
        path_without_name = file.replace(file_name,"")
        name,extension = os.path.splitext(file_name)
        if (not os.path.isfile("/xdisk/dkapellusch/midterm/data/test/Fits_files/"+ path_without_name+name+".solved")):
                raise Exception("Failed!")
        #filedir = "/xdisk/dkapellusch/midterm/data/Fits_files/"+name+".new"
        filedir ="/xdisk/dkapellusch/midterm/data/test/Fits_files/"+ path_without_name+name+".new"
        print(filedir+"is file dir")
        shutil.move(filedir,os.getcwd())
    except Exception as e:
        print(e)
	failed = True
        with open("failed_files.txt",'a+') as afile:
            afile.write(file+".fits" + " was not solved!"+"\n")
    try:
	if failed == True:
		raise Exception("Failed!")
        file_name = ntpath.basename(file)
        path_without_name = file.replace(file_name,"")
        name,extension = os.path.splitext(file_name)
        #filedir = "/xdisk/dkapellusch/midterm/data/Fits_files/"+name+".new"
        filedir ="/xdisk/dkapellusch/midterm/data/test/Fits_files/"+ path_without_name+name+".fits"".cfg"
        print(filedir)
        shutil.move(filedir,os.getcwd())
    except Exception as e:
        print(e)

    return()
コード例 #12
0
ファイル: smbmap.py プロジェクト: byt3bl33d3r/smbmap
 def download_file(self, host, path, verbose=True):
     path = path.replace('/','\\')
     path = ntpath.normpath(path)
     filename = path.split('\\')[-1]   
     share = path.split('\\')[0]
     path = path.replace(share, '')
     try:
         out = open(ntpath.basename('%s/%s' % (os.getcwd(), '%s-%s%s' % (host, share, path.replace('\\','_')))),'wb')
         dlFile = self.smbconn[host].listPath(share, path)
         if verbose:
             msg = '[+] Starting download: %s (%s bytes)' % ('%s%s' % (share, path), dlFile[0].get_filesize())
             if self.pattern:
                 msg = '\t' + msg
             print msg 
         self.smbconn[host].getFile(share, path, out.write)
         if verbose:
             msg = '[+] File output to: %s/%s' % (os.getcwd(), ntpath.basename('%s/%s' % (os.getcwd(), '%s-%s%s' % (host, share, path.replace('\\','_')))))
             if self.pattern:
                 msg = '\t'+msg
             print msg 
     except SessionError as e:
         if 'STATUS_ACCESS_DENIED' in str(e):
             print '[!] Error retrieving file, access denied'
         elif 'STATUS_INVALID_PARAMETER' in str(e):
             print '[!] Error retrieving file, invalid path'
         elif 'STATUS_SHARING_VIOLATION' in str(e):
             print '[!] Error retrieving file, sharing violation'
     except Exception as e:
         print '[!] Error retrieving file, unkown error'
         os.remove(filename)
     out.close()
     return '%s/%s' % (os.getcwd(), ntpath.basename('%s/%s' % (os.getcwd(), '%s-%s%s' % (host, share, path.replace('\\','_')))))
コード例 #13
0
ファイル: app.py プロジェクト: milindmaha/SearchPDF
def list_files(path, client):
	pdf_list = []

	try:
		folder_metadata = client.metadata(path)
	except:
		print ('The Directory does not exist')
		sys.exit(-1)

	print (folder_metadata['is_dir'])
	if folder_metadata['is_dir'] == "False":
		print ('This is not a Directory')
 		sys.exit(1)

	for I in folder_metadata['contents']:
		ntpath.basename(I['path'])
		head, tail = ntpath.split(I['path'])

		if I['is_dir'] == True:
			item_type = 'Dir'
		else:
			item_type = 'File'
			if tail.endswith(".pdf"):
 				pdf_list.append(tail)

	return pdf_list
コード例 #14
0
ファイル: Locomotif.py プロジェクト: zzr1100/locomotifGUI
	def openDataFile(self, Locomotif):
		""" Load a Data File for Further Processing """
		dataFilename = self.tools.selectDataFile()
		if dataFilename == "":
			self.tools.showInfo( "Info", "NO FILE SELECTED" )
			return 0
		
		# deactivate all tabs 
		g_rundata.setWorkingState(1)
		self.work.workCleanTabs( g_tabwidgets, g_rundata )
		g_tabwidgets.t1Data.setCurrentIndex(0)
		
		# store name in global data	
		g_rundata.setDataFileName( dataFilename )
		# set data into current tab
		g_tabwidgets.t1LoadedDataFilename.setText(dataFilename)
		
		# load and display initial data
		self.work.readDataFileIntoTable( g_tabwidgets, g_rundata, dataFilename )
		self.work.markDataOnGoogleMap( g_tabwidgets, g_rundata )
		print "data marked on google map type " + g_rundata.getGoogle1Maptype()
		
		# initial names for maps
		mapv1Filename = configData.getMapPath() + "/" + ntpath.basename(str(dataFilename)) + "_bio_v.png"
		mapv2Filename = configData.getMapPath() + "/" + ntpath.basename(str(dataFilename)) + "_div_v.png"
		print mapv1Filename
		print mapv2Filename
		g_rundata.setV1Mapname( mapv1Filename )
		g_rundata.setV2Mapname( mapv2Filename )
		mapd1Filename = configData.getMapPath() + "/" + ntpath.basename(str(dataFilename)) + "_bio_d.png"
		mapd2Filename = configData.getMapPath() + "/" + ntpath.basename(str(dataFilename)) + "_div_d.png"
		g_rundata.setD1Mapname( mapd1Filename )
		g_rundata.setD2Mapname( mapd2Filename )
		
		return 1
コード例 #15
0
	def menuCallbackView(self, index):

		if index == -1: return
		viewName = self.menuViewNames[index]
		viewId = self.menuViewIds[index]
		self.view2 = self.getViewById(viewId)
		view1Content = self.view1.substr(sublime.Region(0, self.view1.size()))
		view2Content = self.view2.substr(sublime.Region(0, self.view2.size()))

		selectionLeft = getSelectionString(self.view1)
		selectionRight = getSelectionString(self.view2)

		if(selectionLeft != ""): view1Content = selectionLeft
		if(selectionRight != ""): view2Content = selectionRight

		if not self.view1.file_name():
			leftName = self.view1.substr(sublime.Region(0, min(30, self.view1.size())))
		else:
			leftName = ntpath.basename(self.view1.file_name())

		if not self.view2.file_name():
			rightName = self.view2.substr(sublime.Region(0, min(30, self.view2.size())))
		else:
			rightName = ntpath.basename(self.view2.file_name())

		diffSession = DiffSession(leftName, rightName, view1Content, view2Content)
		diffSession.diff()
		diffSession.show()
コード例 #16
0
ファイル: views.py プロジェクト: samlabs821/app
def list_dicts():
    files = glob.glob(APP_STATIC+"/*.xml")
    newlist = []
    for f in files:
        if not basename(f) in "Organization.xml" and not basename(f) in "dictionary.xml":
            newlist.append(basename(f))
    return render_template('dict_list.html', files=newlist, title='Dictionaries')
コード例 #17
0
ファイル: main.py プロジェクト: KeyWeeUsr/MrHyde
 def add(self, item, from_screen):
     self.get_screens()
     scroll = self.scr.ids.filelist
     if len(item) == 0:
         itemlist = []
         if from_screen == 'upload':
             observablelist = self.scr.ids.machine.files
         elif from_screen == 'view':
             observablelist = self.scr.ids.laboratory.files
         for i in observablelist:
             if i != '..\\':
                 itemlist.append(i)
         for i in itemlist:
             if i not in self.app.flist:
                 self.app.flist.append(i)
                 ipath = i
                 i = ntpath.basename(ipath)
                 scroll.add_widget(FileItem(text=i, way=self, path=ipath))
     else:
         i = item[0]
         if i not in self.app.flist:
             self.app.flist.append(i)
             ipath = i
             i = ntpath.basename(i)
             scroll.add_widget(FileItem(text=i, way=self, path=ipath))
コード例 #18
0
ファイル: fExamine.py プロジェクト: KDOTGIS/NG911
def ExamineGDB(gdb):
    import ntpath, re
    reviewpath=ntpath.basename(gdb)

    from arcpy import env, ListWorkspaces, ListDatasets, ListTables, ListFeatureClasses, GetCount_management, Compact_management, ListFields
    #set the workspace from the config file
    env.workspace = ntpath.dirname(gdb)
    ng911 = gdb
    print "geodatabases"
    print ng911
    env.workspace = ng911
    datasets = ListDatasets()
    print "Datasets:"
    for dataset in datasets:
        print "     "+ str(dataset)
    tables = ListTables()
    print " tables:"
    for table in tables:
        fcc = GetCount_management(table)
        print "     "+str(table)
    fd = datasets[0]
    fcs = ListFeatureClasses("", "", fd)
    for fc in fcs:
        fields = ListFields(fc)
        fcc = GetCount_management(fc)
        print fc +", " + str(fcc) + " features"
        for field in fields:
            print "        "+str(field.name)+", "+str(field.type)
    checkfile = reviewpath+"/"+ntpath.basename(ng911)
    topo= fd+"/NG911_Topology"
    Compact_management(ng911)
コード例 #19
0
    def create_deployable_zip(self, path_for_zip, sources_path=None, extra_files_paths=None):
        LOGGER.info('Creating zip package for {} project'.format(self.name))
        if not os.path.exists(path_for_zip):
            os.makedirs(path_for_zip)
        project_files_path = sources_path if sources_path else self._local_sources_path
        try:
            for extra_file_path in extra_files_paths:
                shutil.copyfile(extra_file_path, os.path.join(project_files_path, ntpath.basename(extra_file_path)))
                if ntpath.basename(extra_file_path) == 'manifest.yml':
                    app_manifest_path = os.path.join(project_files_path, ntpath.basename(extra_file_path))
                    with open(app_manifest_path, 'r') as f_stream:
                        manifest_yml = yaml.load(f_stream)
                    manifest_yml['applications'][0]['env']['VERSION'] = self._version_in_manifest
                    with open(app_manifest_path, 'w') as f_stream:
                        f_stream.write(yaml.safe_dump(manifest_yml))
        except Exception as e:
            LOGGER.error('Cannot add extra files to {} project zip package'.format(self.name))
            raise e

        path_for_zip = os.path.join(path_for_zip, self.zip_name + '.zip') if self.zip_name else os.path.join(path_for_zip, self.name + '.zip')

        try:
            deployable_zip = zipfile.ZipFile(path_for_zip, 'w')
            for root, dirs, files in os.walk(project_files_path):
                for file in files:
                    deployable_zip.write(os.path.join(os.path.relpath(root, PLATFORM_PARENT_PATH), file),
                                         os.path.join(os.path.relpath(root, os.path.join(PLATFORM_PARENT_PATH, self.name)), file))
            deployable_zip.close()
        except Exception as e:
            LOGGER.error('Cannot create zip package for {}'.format(self.name))
            raise e

        LOGGER.info("Package for {} has been created".format(self.name))
コード例 #20
0
ファイル: VM.py プロジェクト: cmantas/cluster_python_tool
    def run_files(self, files):
        """
        puts a file in the VM and then runs it
        :param files:
        :return:
        """
        self.put_files(files)

        filename = ''
        remote_path = ''
        if not isinstance(files, (list, tuple)):
            head, tail = ntpath.split(files)
            filename = tail or ntpath.basename(head)
            remote_path = "~/scripts/" + filename
        else:
            for f in files:
                head, tail = ntpath.split(f)
                short_fname = (tail or ntpath.basename(head))
                filename += short_fname + ' '
                remote_path += "~/scripts/"+short_fname+"; "
        #generate the command that runs the desired scripts
        command = 'chmod +x %s; ' \
                  'mkdir -p scripts;' \
                  'mv %s ~/scripts/ 2>/dev/null;' \
                  '%s'\
                  % (filename, filename, remote_path)
        return self.run_command(command)
コード例 #21
0
ファイル: sourceWrapper.py プロジェクト: CZ-NIC/convey
    def __init__(self, file, fresh=False):
        self.file = file
        info = os.stat(self.file)
        self.hash = str(
            hash(info.st_size + round(info.st_mtime)))  # Why round: during file copying we may cut mikrosecond part behind mantisa.

        # MailDraft.setHash(self.hash)
        # MailDraft.setDir(os.path.dirname(file) + "/")

        # cache-file s metadaty zdrojoveho souboru
        Config.set_cache_dir(os.path.dirname(file) + "/" + ntpath.basename(self.file) + "_convey" + self.hash + "/")
        self.cacheFile = Config.get_cache_dir() + ntpath.basename(self.file) + ".cache"  # "cache/" +
        if os.path.isfile(self.cacheFile) and not fresh:
            print("File {} has already been processed.".format(self.file))
            # import pdb;pdb.set_trace()
            try:  # try to depickle
                self.csv = jsonpickle.decode(open(self.cacheFile, "r").read(), keys=True)
                # correction of a wrongly pickling: instead of {IPNetwork('...'): (IPNetwork('...'),
                # we see {IPNetwork('...'): (<jsonpickle.unpickler._IDProxy object at 0x...>,
                # Note that IPRange is pickled correctly.
                for prefix, o in self.csv.ranges.items():
                    l = list(o)
                    l[0] = prefix
                    self.csv.ranges[prefix] = tuple(l)
            except:
                import traceback
                print(traceback.format_exc())
                print("Cache file loading failed, let's process it all again. If you continue, cache gets deleted.")
                input()
                if Config.is_debug():
                    ipdb.set_trace()
                self._treat()
            if self.csv:
                if self.csv.source_file != self.file:  # file might have been moved to another location
                    self.csv.source_file = self.file
                try:
                    if self.csv.is_analyzed:
                        self.csv.informer.sout_info()
                    elif self.csv.is_formatted:
                        self.csv.informer.sout_info()
                        s = "It seems the file has already been formatted."  # X Continue to analysis (or you'll be asked to do format again)?"
                        print(s)
                        # if Dialogue.isYes(s):
                        #    self.csv.run_analysis()
                        # else:
                        #    self._treat()
                except BdbQuit:  # we do not want to catch quit() signal from ipdb
                    print("Stopping.")
                    quit()
                except Exception as e:
                    print(e)
                    print(
                        "Format of the file may have changed since last time. Let's process it all again. If you continue, cache gets deleted.")
                    self._treat()
            else:
                self._treat()  # process file
        else:
            if not os.path.exists(Config.get_cache_dir()):
                os.makedirs(Config.get_cache_dir())
            self._treat()  # process file
コード例 #22
0
ファイル: NetMeter.py プロジェクト: blochl/NetMeter
def cmd_print(text, conn_name, dir_time):
    if isinstance(text, str):
        # The command is a string
        print_cmd = text
        # The last quoted string
        try:
            print_log = text.rsplit('"', 2)[1]
        except:
            print_log = text

    else:
        # The command is a list
        print_cmd = text[:]
        print_log = ' '.join(print_cmd)
        if basename(print_cmd[0]) == 'winexe' or basename(print_cmd[0]) == 'ssh':
            print_log = print_cmd[-1]
            # So that the passed command would be quoted, as it is actually passed this way.
            print_cmd[-1] = '"' + print_cmd[-1] + '"'

        print_cmd = ' '.join(print_cmd)

    with open(dir_time + '_iperf_commands.log', 'a') as logfile:
            logfile.write(time_header() + conn_name + ': ' + print_log + '\n')

    if debug:
        print('####### Debug mode on #######\n' +
              'Command:\n' + print_cmd + '\n' +
              '#############################')
コード例 #23
0
ファイル: build.py プロジェクト: QueryInterface/RenderBase
    def __unpack(self, module):
        # Unpack module
        print "Unpacking " + module + "..."
        srcPath = "./3rdParty/" + module + "/"
        dstPath = "./3rdParty/_unpack/" + module + "/"
        if not os.path.exists(dstPath) or not os.path.isdir(dstPath) or not os.listdir(dstPath):
            # Extract zip files
            files = glob.glob(srcPath + "*.zip")
            for f in files:
                basename = ntpath.basename(f)
                basename = os.path.splitext(basename)[0]
                if not os.path.exists(dstPath) or not os.path.isdir(dstPath):
                    os.makedirs(dstPath)
                utils.Unzip(f, dstPath)
            # Extract tar files
            files = glob.glob(srcPath + "*.bz2")
            for f in files:
                basename = ntpath.basename(f)
                basename = os.path.splitext(basename)[0]
                dstPathForBz2 = dstPath + basename
                if not os.path.exists(dstPathForBz2) or not os.path.isdir(dstPathForBz2):
                    os.makedirs(dstPathForBz2)
                utils.Untar(f, dstPathForBz2)

        print "Unpacking " + module + "...DONE"
コード例 #24
0
def remove_extra_raws(source_files, target_files, dry_run=True):

    counter = 0

    for i in target_files:
        file_path, file_ext = os.path.splitext(i)
        file_name = ntpath.basename(file_path)
        found = False

        for j in source_files:
            source_file_path, source_file_ext = os.path.splitext(j)
            source_file_name = ntpath.basename(source_file_path)

            if file_name == source_file_name:
                found = True
                break

        if not found:
            counter += 1
            if dry_run:
                print "(dry run) remove %s" % (file_name + file_ext)
            else:
                os.remove(i)

    print "%d files removed" % counter

    return
コード例 #25
0
def combine_xgb_preds(model):
    train = pd.read_csv('{0}/train.csv'.format(DATA_DIR), usecols=['ID'])
    test = pd.read_csv('{0}/test.csv'.format(DATA_DIR), usecols=['ID'])

    oof = get_oof(model)
    k = 0
    for tr, te in oof:
        a = ntpath.basename(tr.replace('.train.csv', ''))
        b = ntpath.basename(te.replace('.test.csv', ''))
        assert a == b
        print a

        tr_data = pd.concat((train.ID, pd.read_csv(tr, header=None)), axis=1, ignore_index=True)
        te_data = pd.concat((test.ID, pd.read_csv(te, header=None)), axis=1, ignore_index=True)

        assert tr_data.shape[0] == train.shape[0]
        assert tr_data.shape[1] == 2
        assert te_data.shape[0] == test.shape[0]
        assert te_data.shape[1] == 2

        tr_data.columns = te_data.columns = ['ID', 'TARGET']

        meta_feat = 'set{0:02d}'.format(k+1)
        k += 1

        train = train.merge(tr_data, how='left', on='ID').rename(columns={'TARGET': meta_feat})
        test = test.merge(te_data, how='left', on='ID').rename(columns={'TARGET': meta_feat})

    yhat_train = MinMaxScaler().fit_transform(train.drop(['ID'], axis=1).mean(1).reshape(-1, 1))
    yhat_test = MinMaxScaler().fit_transform(test.drop(['ID'], axis=1).mean(1).reshape(-1, 1))

    return yhat_train, yhat_test
コード例 #26
0
ファイル: tasks.py プロジェクト: rich-hart/test_auto_ui
def inspect(test_id):
    cvtest = CVTest.objects.filter(id=test_id)[0]
    pdf_a_path = cvtest.pdf_a.pdf_file.path
    pdf_b_path = cvtest.pdf_b.pdf_file.path
    options = cvtest.options.option_string
    command = "ox_inspect {0}".format(options)
    cvtest.started = datetime.now()
    cvtest.save()
    with tempfile.TemporaryDirectory() as tmpdirname:
        
        pdf_a_filename = ntpath.basename(pdf_a_path)
        temp_pdf_a_path = join(tmpdirname,pdf_a_filename)
        shutil.copy2(pdf_a_path,tmpdirname)

        pdf_b_filename = ntpath.basename(pdf_b_path)
        temp_pdf_b_path = join(tmpdirname,pdf_b_filename)
        shutil.copy2(pdf_b_path,temp_pdf_b_path)


        command = "ox_inspect {0} {1} {2}".format(options or '',
                                                  temp_pdf_a_path, 
                                                  temp_pdf_b_path)
        try:
            cvtest.results = subprocess.check_output(command.split())
            cvtest.finished = datetime.now()
        except FileNotFoundError as e:
            if 'ox_inspect' in e.strerror:
                cvtest.results = "Error ox_inspect executable missing.\n"\
                                 "Make sure https://github.com/rich-hart/inspection is installed"
        finally:
            cvtest.save()
            return cvtest.results
コード例 #27
0
ファイル: cleanser.py プロジェクト: mgbrian/rfa
def main(argv):
    try: file_path = argv[1]
    except: _die('Please provide path to data file.')

    if argv[1] == '--help':
        print __doc__
        exit(0)

    # Split filename into name + extension
    try:
        file_extension, file_name = \
            [x[::-1] for x in ntpath.basename(file_path)[::-1].split('.', 1)]

    # File doesn't have extension
    except ValueError:
        file_extension, file_name = ('', ntpath.basename(file_path))

    # Get rows from file
    rows = get_rows(file_path, 'csv' if file_extension == 'csv' else 'flat')

    # Cleanse rows and write to new file
    cleansed_csv_file = open(file_name + '_cleansed.csv', 'wb')
    
    writer = csv.writer(cleansed_csv_file)
    for row in cleanse(rows): writer.writerow(row)

    cleansed_csv_file.close()
コード例 #28
0
ファイル: PreProcess.py プロジェクト: benmccamish/AIProject2
    def preProcessDir(self, directory):
        """ Makes a new directory in /cleanData. 
            Compressing files to one line with only alpabetic characters"""
        if not os.path.exists("cleanData"):         # Create directoires
            os.makedirs("cleanData")
        if os.path.exists("cleanData/"+ntpath.basename(directory)):
            return # assume we can quit if our cleanData exists
        # otherwise, on with the cleaning
        if self.useStopWords == "custom" and not os.path.exists("stoplist.txt"): # create stoplist if we dont have one
            self.makeStoplist(30)
        os.makedirs("cleanData/"+ntpath.basename(directory))
        for fname in os.listdir(directory):       # Process files
            with open(directory + "/"+ fname) as dataFile:
                data = normalize(dataFile.read())
            
            
            if self.useStopWords == "web":
                data = self.stopWords("common-english-words.txt", data)
            elif self.useStopWords == "custom":
                data = self.stopWords("stoplist.txt", data)

            if self.nGramSize > 1:
                for n in xrange(2, self.nGramSize + 1):
                    data += "\n" + nGrams(data, n)
            
            with open("cleanData/"+ ntpath.basename(directory) +"/"+ fname, "w+") as cleanFile:
                cleanFile.write(data)
コード例 #29
0
def classesToFolders(files_abs_path, partition_abs_path, pos_class_0_abs_path, pos_class_1_abs_path, neg_class_0_abs_path, neg_class_1_abs_path):

    '''seperate classes into diffeerent folders'''

    pos_partition_file = "positive-partitions-test.tab"
    neg_partition_file = "negative-partitions-test.tab"

    tab_list = os.listdir(partition_abs_path)
    for tab_file in os.listdir(partition_abs_path):
        files_list = os.listdir(files_abs_path)
        
        for folder_name in os.listdir(files_abs_path):
            folder_path = os.path.join(files_abs_path, folder_name)
            
            if tab_file == pos_partition_file and folder_name == 'positives-sto':
                read_pos_tab = open(os.path.join(partition_abs_path, tab_file), 'r')

                for line in read_pos_tab:
                    line_parts= line.split()
                    file_name = line_parts[0]
                    file_class_num = line_parts[1]

                    '''copy file to the pos_class_0 folder'''
                    if file_class_num == '0':
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, pos_class_0_abs_path)
                            
                    '''copy file to the pos_class_1 folder'''
                    if file_class_num == '1':                      
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, pos_class_1_abs_path)
                                
            if tab_file == neg_partition_file and folder_name == 'negatives-sto':
                read_neg_tab = open(os.path.join(partition_abs_path, tab_file), 'r')
                for line in read_neg_tab:
                    line_parts= line.split()
                    file_name = line_parts[0]
                    file_class_num = line_parts[1]
                    '''copy file to the pos_class_0 folder'''
                    if file_class_num == '0':
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, neg_class_0_abs_path)

                    '''copy file to the pos_class_1 folder'''
                    if file_class_num == '1':
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, neg_class_1_abs_path)
コード例 #30
0
ファイル: account_utils.py プロジェクト: hidakanoko/hammr
def ami(account):
    myCredAccount = CredAccountAws()
    # doing field verification
    if not "x509Cert" in account:
        printer.out("x509Cert in ami account not found", printer.ERROR)
        return
    if not "x509PrivateKey" in account:
        printer.out("x509PrivateKey in ami account not found", printer.ERROR)
        return
    if not "accessKey" in account:
        printer.out("accessKey in ami account not found", printer.ERROR)
        return
    if not "secretAccessKey" in account:
        printer.out("secretAccessKey in ami account not found", printer.ERROR)
        return
    if not "accountNumber" in account:
        printer.out("accountNumber for ami account not found", printer.ERROR)
        return
    if not "name" in account:
        printer.out("name for ami account not found", printer.ERROR)
        return

    myCredAccount.accountNumber = account["accountNumber"]
    myCredAccount.secretAccessKeyID = account["secretAccessKey"]
    myCredAccount.accessKeyID = account["accessKey"]
    myCredAccount.name = account["name"]

    myCertificates = certificates()
    myCredAccount.certificates = myCertificates

    try:
        myCertificate = certificate()
        with open(account["x509Cert"], "r") as myfile:
            myCertificate.certStr = myfile.read()
        myCertificate.type_ = "x509"
        myCertificate.name = ntpath.basename(account["x509Cert"])
        myCertificates.add_certificate(myCertificate)
        myCertificate = certificate()
        with open(account["x509PrivateKey"], "r") as myfile:
            myCertificate.certStr = myfile.read()
        myCertificate.type_ = "ec2PrivateKey"
        myCertificate.name = ntpath.basename(account["x509PrivateKey"])
        myCertificates.add_certificate(myCertificate)

        if "keyPairPrivateKey" in account:
            myCertificate = certificate()
            with open(account["keyPairPrivateKey"], "r") as myfile:
                myCertificate.certStr = myfile.read()
            myCertificate.type_ = "ec2KeyPairPrivateKey"
            myCertificate.name = ntpath.basename(account["keyPairPrivateKey"])
            myCertificates.add_certificate(myCertificate)

            myCredAccount.keyPairName = os.path.splitext(myCertificate.name)[0]

    except IOError as e:
        printer.out("File error: " + str(e), printer.ERROR)
        return

    return myCredAccount
コード例 #31
0
ファイル: test.files.py プロジェクト: tiger-tiger/dockstudio
#!/usr/bin/env python

import re
import glob
import ntpath

names = glob.glob('project/toxim/stage/*.pdb')

names = '\n'.join([ntpath.basename(path) for path in names])

print(names)
m = re.search('1dIY.pDb', names, re.IGNORECASE)

print('match: ', m.group(0))
コード例 #32
0
ファイル: video.py プロジェクト: ezryd3r/MiniMedia
 def __init__(self, filename=None):
     self.filename = filename
     self.name = ntpath.basename(filename)
     self.root = self.filename.replace(self.name, '')
     self.created = os.path.getmtime(self.filename)
     self.size = os.stat(self.filename).st_size
コード例 #33
0
import wave, struct, ntpath

files = ["src/nodes/simple_cab/clean.wav", "src/nodes/simple_cab/air.wav"]

for f in files:
    wav = wave.open(f, "rb")
    length = wav.getnframes()
    headerFile = open(f.replace(".wav", ".h"), "w+")
    out = """#pragma once
namespace InteralIR {
size_t """ + ntpath.basename(f).replace(
        ".wav", "IR") + """Length = """ + str(length) + """;
float """ + ntpath.basename(f).replace(".wav", "IRL") + "[] = {"
    for s in range(0, length):
        waveData = wav.readframes(1)
        data = struct.unpack("h", waveData)[0] / 32768.0
        out += str(data)
        if s != length - 1:
            out += ","
    out += """};
float* """ + ntpath.basename(f).replace(
        ".wav", "IR") + """[] = { """ + ntpath.basename(f).replace(
            ".wav", "IRL") + """};
}"""
    headerFile.write(out)
    headerFile.close()
コード例 #34
0
def main(file_name):
    #  videofile = args.video
    videofile = file_name
    mode = args.mode
    if not os.path.exists(args.outputpath):
        os.mkdir(args.outputpath)

    if not len(videofile):
        raise IOError('Error: must contain --video')

    # Load input video
    data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
    (fourcc, fps, frameSize) = data_loader.videoinfo()

    # Load detection loader
    print('Loading YOLO model..')
    sys.stdout.flush()
    det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
    det_processor = DetectionProcessor(det_loader).start()

    # Load pose model
    pose_dataset = Mscoco()
    if args.fast_inference:
        pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
    else:
        pose_model = InferenNet(4 * 1 + 1, pose_dataset)
    pose_model.cuda()
    pose_model.eval()

    runtime_profile = {'dt': [], 'pt': [], 'pn': []}

    # Data writer
    save_path = os.path.join(
        args.outputpath,
        'AlphaPose_' + ntpath.basename(videofile).split('.')[0] + '.avi')
    writer = DataWriter(args.save_video, save_path,
                        cv2.VideoWriter_fourcc(*'XVID'), fps,
                        frameSize).start()

    im_names_desc = tqdm(range(data_loader.length()))
    batchSize = args.posebatch
    for i in im_names_desc:
        start_time = getTime()
        with torch.no_grad():
            (inps, orig_img, im_name, boxes, scores, pt1,
             pt2) = det_processor.read()
            if orig_img is None:
                break
            if boxes is None or boxes.nelement() == 0:
                writer.save(None, None, None, None, None, orig_img,
                            im_name.split('/')[-1])
                continue

            ckpt_time, det_time = getTime(start_time)
            runtime_profile['dt'].append(det_time)
            # Pose Estimation

            datalen = inps.size(0)
            leftover = 0
            if (datalen) % batchSize:
                leftover = 1
            num_batches = datalen // batchSize + leftover
            hm = []
            for j in range(num_batches):
                inps_j = inps[j * batchSize:min((j + 1) *
                                                batchSize, datalen)].cuda()
                hm_j = pose_model(inps_j)
                hm.append(hm_j)
            hm = torch.cat(hm)
            ckpt_time, pose_time = getTime(ckpt_time)
            runtime_profile['pt'].append(pose_time)

            hm = hm.cpu().data
            import ipdb
            ipdb.set_trace()
            writer.save(boxes, scores, hm, pt1, pt2, orig_img,
                        im_name.split('/')[-1])

            ckpt_time, post_time = getTime(ckpt_time)
            runtime_profile['pn'].append(post_time)

        if args.profile:
            # TQDM
            im_names_desc.set_description(
                'det time: {dt:.4f} | pose time: {pt:.4f} | post processing: {pn:.4f}'
                .format(dt=np.mean(runtime_profile['dt']),
                        pt=np.mean(runtime_profile['pt']),
                        pn=np.mean(runtime_profile['pn'])))

    print('===========================> Finish Model Running.')
    if (args.save_img or args.save_video) and not args.vis_fast:
        print(
            '===========================> Rendering remaining images in the queue...'
        )
        print(
            '===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).'
        )
    while (writer.running()):
        pass
    writer.stop()
    final_result = writer.results()
    write_json(final_result, args.outputpath)
コード例 #35
0
def run(request, api=False):
    """View iOS Files."""
    try:
        logger.info('View iOS Source File')
        exp = 'Error Description'
        file_format = None
        if api:
            fil = request.POST['file']
            md5_hash = request.POST['hash']
            mode = request.POST['type']
            viewsource_form = ViewSourceIOSApiForm(request.POST)
        else:
            fil = request.GET['file']
            md5_hash = request.GET['md5']
            mode = request.GET['type']
            viewsource_form = ViewSourceIOSForm(request.GET)
        typ = set_ext_api(fil)
        if not viewsource_form.is_valid():
            err = FormUtil.errors_message(viewsource_form)
            if api:
                return err
            return print_n_send_error_response(request, err, False, exp)
        base = Path(settings.UPLD_DIR) / md5_hash
        if mode == 'ipa':
            src1 = base / 'payload'
            src2 = base / 'Payload'
            if src1.exists():
                src = src1
            elif src2.exists():
                src = src2
            else:
                raise Exception('MobSF cannot find Payload directory')
        elif mode == 'ios':
            src = base
        sfile = src / fil
        sfile = sfile.as_posix()
        if not is_safe_path(src, sfile):
            msg = 'Path Traversal Detected!'
            if api:
                return {'error': 'Path Traversal Detected!'}
            return print_n_send_error_response(request, msg, False, exp)
        dat = ''
        sql_dump = {}
        if typ == 'm':
            file_format = 'cpp'
            with io.open(sfile, mode='r', encoding='utf8',
                         errors='ignore') as flip:
                dat = flip.read()
        elif typ == 'xml':
            file_format = 'xml'
            with io.open(sfile, mode='r', encoding='utf8',
                         errors='ignore') as flip:
                dat = flip.read()
        elif typ == 'plist':
            file_format = 'json'
            dat = biplist.readPlist(sfile)
            try:
                dat = json.dumps(dat, indent=4, sort_keys=True)
            except Exception:
                pass
        elif typ == 'db':
            file_format = 'asciidoc'
            sql_dump = read_sqlite(sfile)
        elif typ == 'txt' and fil == 'classdump.txt':
            file_format = 'cpp'
            app_dir = os.path.join(settings.UPLD_DIR, md5_hash + '/')
            cls_dump_file = os.path.join(app_dir, 'classdump.txt')
            if is_file_exists(cls_dump_file):
                with io.open(
                        cls_dump_file,  # lgtm [py/path-injection]
                        mode='r',
                        encoding='utf8',
                        errors='ignore') as flip:
                    dat = flip.read()
            else:
                dat = 'Class Dump result not Found'
        elif typ == 'txt':
            file_format = 'text'
            with io.open(sfile, mode='r', encoding='utf8',
                         errors='ignore') as flip:
                dat = flip.read()
        else:
            if api:
                return {'error': 'Invalid Parameters'}
            return HttpResponseRedirect('/error/')
        context = {
            'title': escape(ntpath.basename(fil)),
            'file': escape(ntpath.basename(fil)),
            'type': file_format,
            'data': dat,
            'sqlite': sql_dump,
            'version': settings.MOBSF_VER,
        }
        template = 'general/view.html'
        if api:
            return context
        return render(request, template, context)
    except Exception as exp:
        logger.exception('Error Viewing Source')
        msg = str(exp)
        exp = exp.__doc__
        if api:
            return print_n_send_error_response(request, msg, True, exp)
        return print_n_send_error_response(request, msg, False, exp)
コード例 #36
0
speciesMappings = {}
speciesMappings['BLANK'] = 'empty'
speciesMappings[''] = 'unlabeled'


#%% Enumerate images, confirm filename uniqueness

imageFullPaths = glob.glob(os.path.join(imageBaseDir,r'**\*.JPG'),recursive=True)

print('Counted {} images'.format(len(imageFullPaths)))

filenamesOnly = set()

for p in imageFullPaths:
    
    fn = ntpath.basename(p)
    assert fn not in filenamesOnly
    filenamesOnly.add(fn)
    
print('Finished uniqueness checking')


#%% Update metadata filenames to include site and camera folders, check existence

filenamesToRows = {}

startTime = time.time()

newRows = []

# iRow = 0; row = metadataTable.iloc[iRow]
コード例 #37
0
ファイル: evtlogs.py プロジェクト: tupipa/volatility
    def parse_evt_info(self, name, buf, rawtime=False):

        loc = buf.find("LfLe")

        ## Skip the EVTLogHeader at offset 4. Here you can also parse
        ## and print the header values if you like.
        if loc == 4:
            loc = buf.find("LfLe", loc + 1)

        while loc != -1:

            ## This record's data (and potentially the data for records
            ## that follow it, so we'll be careful to chop it in the right
            ## places before future uses).
            rec = buf[loc - 4:]

            ## Use a buffer AS to instantiate the object
            bufferas = addrspace.BufferAddressSpace(self._config, data=rec)
            evtlog = obj.Object("EVTRecordStruct", offset=0, vm=bufferas)
            rec_size = bufferas.profile.get_obj_size("EVTRecordStruct")

            ## Calculate the SID string. If the SidLength is zero, the next
            ## field (list of strings) starts at StringOffset. If the SidLength
            ## is non-zero, use the data of length SidLength to determine the
            ## SID string and the next field starts at SidOffet.
            if evtlog.SidLength == 0:
                end = evtlog.StringOffset
                sid_string = "N/A"
            else:
                ## detect manged records based on invalid SID length
                if evtlog.SidLength > 68:
                    loc = buf.find("LfLe", loc + 1)
                    continue
                ## these should be appropriately sized SIDs
                end = evtlog.SidOffset
                sid_string = self.get_sid_string(rec[end:end +
                                                     evtlog.SidLength])

            computer_name = ""
            source = ""

            items = rec[rec_size:end].split("\x00\x00")
            source = utils.remove_unprintable(items[0])
            if len(items) > 1:
                computer_name = utils.remove_unprintable(items[1])

            strings = rec[evtlog.StringOffset:].split("\x00\x00",
                                                      evtlog.NumStrings)
            messages = []
            for s in range(min(len(strings), evtlog.NumStrings)):
                messages.append(utils.remove_unprintable(strings[s]))

            # We'll just say N/A if there are no messages, otherwise join them
            # together with semi-colons.
            if messages:
                msg = ";".join(messages)
                msg = msg.replace("|", "%7c")
            else:
                msg = "N/A"

            # Records with an invalid timestamp are ignored entirely
            if evtlog.TimeWritten != None:

                fields = [
                    str(evtlog.TimeWritten)
                    if not rawtime else evtlog.TimeWritten,
                    ntpath.basename(name), computer_name, sid_string, source,
                    str(evtlog.EventID),
                    str(evtlog.EventType), msg
                ]

                yield fields

            ## Scan to the next record signature
            loc = buf.find("LfLe", loc + 1)
コード例 #38
0
def test_annulus_bn_msh():
    curr_file = os.path.dirname(
        os.path.realpath(__file__)) + "/output/" + fname + "/" + fname + ".msh"

    assert mesh_file_test(curr_file), "%s does not match the model answer" % (
        ntpath.basename(curr_file).rstrip())
コード例 #39
0
ファイル: sdkcomponent.py プロジェクト: zrzhd/qtsdk
 def path_leaf(self, path):
     head, tail = ntpath.split(path)
     return tail or ntpath.basename(head)
コード例 #40
0
    def handle(self, *args, **options):
        products = Product.objects.all()
        buyersguide_page = self.get_or_create_buyers_guide()

        for product in products:
            # 1. Create ProductPage out of this product
            product = product.specific  # Get the specific class

            # Always refresh the buyersguide_page to update treebeards pathing
            buyersguide_page.refresh_from_db()

            # Check if ProductPage exists. If it does, continue on.
            # This check will allow us to run this script more than once if needed
            if ProductPage.objects.filter(slug=product.slug).exists():
                self.debug_print(
                    f"Product '{product.slug}' already exists, skipping.")
                continue

            if isinstance(product, SoftwareProduct):
                new_product_page = SoftwareProductPage()
                specific_fields = [
                    'medical_privacy_compliant', 'easy_to_learn_and_use',
                    'handles_recordings_how', 'recording_alert',
                    'recording_alert_helptext',
                    'medical_privacy_compliant_helptext', 'host_controls',
                    'easy_to_learn_and_use_helptext'
                ]
            elif isinstance(product, GeneralProduct):
                new_product_page = GeneralProductPage()
                specific_fields = [
                    'camera_device', 'camera_app', 'microphone_device',
                    'microphone_app', 'location_device', 'location_app',
                    'personal_data_collected', 'biometric_data_collected',
                    'social_data_collected', 'how_can_you_control_your_data',
                    'data_control_policy_is_bad', 'track_record_choices',
                    'company_track_record', 'track_record_is_bad',
                    'track_record_details', 'offline_capable',
                    'offline_use_description', 'uses_ai',
                    'ai_uses_personal_data', 'ai_is_transparent', 'ai_helptext'
                ]
            self.debug_print(
                f"Treating '{product.slug}' as {new_product_page.__class__.__name__}"
            )

            # Apply the fields that are different or may cause issues if copied directly from one model to another
            new_product_page.slug_en = product.slug
            new_product_page.title = product.name
            new_product_page.title_en = product.name
            new_product_page.product_url = product.url
            new_product_page.cloudinary_image = product.cloudinary_image
            new_product_page.live = not product.draft  # If product is draft, it shall not be live.

            # These are the common fields between SoftwareProductPages and GeneralProductPages
            fields = specific_fields + [
                'slug', 'privacy_ding', 'adult_content', 'uses_wifi',
                'uses_bluetooth', 'review_date', 'company', 'blurb', 'price',
                'worst_case', 'signup_requires_email', 'signup_requires_phone',
                'signup_requires_third_party_account',
                'signup_requirement_explanation',
                'how_does_it_use_data_collected',
                'data_collection_policy_is_bad',
                'user_friendly_privacy_policy',
                'show_ding_for_minimum_security_standards',
                'meets_minimum_security_standards', 'uses_encryption',
                'uses_encryption_helptext', 'security_updates',
                'security_updates_helptext', 'strong_password',
                'strong_password_helptext', 'manage_vulnerabilities',
                'manage_vulnerabilities_helptext', 'privacy_policy',
                'privacy_policy_helptext', 'phone_number', 'live_chat',
                'email', 'twitter'
            ]

            self.debug_print("\tSetting fields:")
            for field in fields:
                # Loop through every field for this product and copy the value
                # from the Product model to the Page model.
                self.debug_print("\t\t", field, " as ",
                                 getattr(product, field))
                setattr(new_product_page, field, getattr(product, field))

            self.debug_print(f"Product has image? {bool(product.image)}")
            self.debug_print(
                f"Product has cloudinary image? {bool(product.cloudinary_image)}"
            )

            # Get the image file field, and convert it into a WagtailImage object
            if product.image:
                # Check if there is an image file. If there isn't one, don't try to copy the
                # FieldFile to a WagtailImage object.
                try:
                    image_file = product.image.file
                except FileNotFoundError:
                    image_file = None

                if image_file:
                    mime = MimeTypes()
                    mime_type = mime.guess_type(
                        product.image.file.name)  # -> ('image/jpeg', None)
                    if mime_type:
                        mime_type = mime_type[0].split('/')[1].upper()
                    else:
                        # Default to a JPEG mimetype.
                        mime_type = 'JPEG'
                    # Create an image out of the FileField.
                    pil_image = PILImage.open(product.image.file)
                    f = BytesIO()
                    pil_image.save(f, mime_type)
                    # Store the image as a WagtailImage object
                    new_image_name = ntpath.basename(product.image.file.name)
                    wagtail_image = WagtailImage.objects.create(
                        title=new_image_name,
                        file=ImageFile(f, name=new_image_name))
                    # Associate new_product_page.image with wagtail_image
                    new_product_page.image = wagtail_image

            # Add the new page as a child to BuyersGuidePage. This will add a
            # `path` to the new_product_page and place it in the Wagtail Tree
            # using Django Treebeard
            buyersguide_page.add_child(instance=new_product_page)

            # Save revision and/or publish so we can add Orderables to this page.
            new_product_page.save()
            new_product_page.save_revision()

            self.debug_print("\tCreated", new_product_page)

            # Loop through all the m2ms and create Orderable objects for this new page type
            # Add privacy policy links
            for privacy_link in product.privacy_policy_links.all():
                new_orderable = ProductPagePrivacyPolicyLink()
                new_orderable.page = new_product_page
                new_orderable.label = privacy_link.label
                new_orderable.url = privacy_link.url
                new_orderable.save()
                new_product_page.privacy_policy_links.add(new_orderable)
                self.debug_print("\tPrivacy Orderables added")
            # Add product categories
            for category in product.product_category.all():
                new_orderable = ProductPageCategory()
                new_orderable.product = new_product_page
                new_orderable.category = category
                new_orderable.save()
                new_product_page.product_categories.add(new_orderable)
                self.debug_print("\tCategory Orderables added")
            # Add updates
            for update in product.updates.all():
                new_orderable = ProductUpdates()
                new_orderable.page = new_product_page
                new_orderable.update = update
                new_orderable.save()
                new_product_page.updates.add(new_orderable)
                self.debug_print("\tUpdate Orderables added")

            # Attach a Votes object to each page if `Page.get_or_create_votes()` exists.
            if hasattr(new_product_page, 'get_or_create_votes'):
                new_product_page.get_or_create_votes()
                # Use .to_dict() to pull out the old aggregated votes
                product_dict = product.to_dict()
                votes = product_dict.get('votes', None)
                if votes:
                    votes = votes.get('creepiness').get('vote_breakdown')
                    self.debug_print(votes)
                    values = [x for (i, x) in sorted(votes.items())]
                    product_total = sum([
                        x * ((i + 1) * 20 - 10) for i, x in enumerate(values)
                    ])
                    self.debug_print(
                        f'\tOriginal votes: {values} (total score: {product_total})'
                    )
                else:
                    # Default vote "bin"
                    values = [0, 0, 0, 0, 0]
                    product_total = 0

            new_product_page.votes.set_votes(values)
            new_product_page.creepiness_value = product_total
            new_product_page.save()
            self.debug_print(
                f'\tNew product votes: {new_product_page.get_or_create_votes()}'
            )

            if not product.draft:
                new_product_page.live = True
                new_product_page.save_revision().publish()
            else:
                new_product_page.save_revision()

            # Always good to fresh from db when using Django Treebeard.
            buyersguide_page.refresh_from_db()

        time.sleep(1)

        # Once all the ProductPages are added, add related_products
        # By writing a secondary for loop we can avoid attaching a legacy_product
        # to each ProductPage because they'll have slugs in common.
        self.debug_print("\nFinal step: Adding related products\n")

        # Loop through every ProductPage we now have.
        for product_page in ProductPage.objects.all():
            # Fetch the PNI Product that this page was created from.
            try:
                product = Product.objects.get(slug=product_page.slug)
            except Product.DoesNotExist:
                self.debug_print(
                    f"Skipping {product_page} because a ProductPage.slug={product_page.slug} was not found"
                )  # noqa
                continue
            # Loop through all the Product.related_products
            for related_product in product.related_products.all():
                try:
                    # Find the related ProductPage based on the correct slug.
                    related_page = ProductPage.objects.get(
                        slug=related_product.slug)
                except ProductPage.DoesNotExist:
                    self.debug_print("Missing product page", product_page)
                    continue
                # Create a new Orderable for the Related Product. This provides
                # a higher quality editing experience for Wagtail editors/admins.
                new_related_product = RelatedProducts()
                new_related_product.page = product_page
                new_related_product.related_product = related_page
                new_related_product.save()
                product_page.related_product_pages.add(new_related_product)
                self.debug_print("\tAdded related product page:", related_page)
コード例 #41
0
ファイル: QIM_encode.py プロジェクト: raghu429/LiDAR_QIM
# All globals are initialized in QIM_helper.py

if __name__ == '__main__':

    # ROS node initialization
    rospy.init_node('QIM_encode', anonymous=True)

    # Traverse a given directory and work on each file
    # Directory of Kitti binary file

    data_directory = './QIM_data/test_data/'

    for filename in os.listdir(data_directory):
        if filename.endswith(".bin") and filename.startswith("000071"):
            # if filename.endswith(".bin"):
            working_file_name = ntpath.basename(
                os.path.join(data_directory, filename)).split('.')[0]
            print('file currently working on %s' % (working_file_name))

            qim_encoded_pointcloud = []
            #****************************************************************************************************************************
            # Read & poreprocess point cloud
            #****************************************************************************************************************************

            points_list = load_pc_from_bin(
                os.path.join(data_directory, filename))
            pc_camera_angle_filtered = filter_camera_angle(points_list[:, :3])

            pc_groundplane_filtered = filter_groundplane(
                np.copy(pc_camera_angle_filtered), groundplane_level)

            working_file_name_pcd = working_file_name + '.npy'
コード例 #42
0
ファイル: bucky.py プロジェクト: atabayev/asar
def send_template():
    try:
        # set up the SMTP server
        s = smtplib.SMTP(host='smtp.mail.ru', port=2525)
        s.starttls()
        s.login(mail_entry.get(), pass_entry.get())

        if MY_ADDRESS + " " + PASSWORD is not mail_entry.get(
        ) + " " + pass_entry.get():
            file = open("smtp.txt", mode="a")

            log = str(datetime.now()) + " Login as:" + mail_entry.get()

            print(log)

            file.seek(0)  # <- This is the missing piece
            file.truncate()

            file.write(mail_entry.get() + " " + pass_entry.get() + "\n")

            file.close()

        is_hidden = var.get()
        print(is_hidden)
        # Проверка стоит ли галочка
        if not var.get():
            counts = 0
            counter = 0
            # For each contact, send the email:

            for name, email, text in zip(names, emails, texts):
                # messagebox.showinfo("Contact", email)
                try:
                    if counts == int(count_entry.get()):
                        print("Please, wait, the timer is working...")
                        time.sleep(60 * int(interval_entry.get()))
                        counts = 0

                    counts += 1

                    # set up the SMTP server
                    s = smtplib.SMTP(host='smtp.mail.ru', port=2525)
                    s.starttls()
                    s.login(mail_entry.get(), pass_entry.get())

                    msg = MIMEMultipart()  # create a message

                    filename = attachment_text.cget(
                        "text")  # attachment_button.

                    if filename:
                        attachment = open(filename, 'rb')

                        part = MIMEBase('application', 'octet-stream')
                        part.set_payload((attachment).read())
                        encoders.encode_base64(part)
                        part.add_header(
                            'Content-Disposition', "attachment; filename= %s" %
                            ntpath.basename(filename))

                        msg.attach(part)

                        attachment.close()

                        del attachment

                    message_temp = MIMEText(
                        Template(template_text.get(
                            "1.0", END)).substitute(PERSON_NAME=name), 'plain')

                    msg.attach(message_temp)

                    counter += 1
                    log = str(datetime.now()) + " " + str(
                        counter) + " Sent to " + email + " Filename: " + ""

                    print(log)

                    file = open("logs.txt", mode="a", encoding="utf-8")

                    file.write(log + "\n")

                    file.close()

                    # setup the parameters of the message
                    msg['From'] = mail_entry.get()
                    msg['To'] = email
                    msg['Subject'] = theme_entry.get()

                    # add in the message body
                    # msg.attach(msg.as_string())

                    # send the message via the server set up earlier.
                    s.send_message(msg)
                    del msg

                except Exception as e1:
                    log = str(datetime.now()) + " Error#51:" + str(e1.args)
                    file = open("logs.txt", mode="a", encoding="utf-8")
                    file.write(log + "\n")
                    file.close()
                    print("Error#5: " + str(e1.args))
                    pass
            # Terminate the SMTP session and close the connection
            s.quit()

        else:
            counts = 0
            counter = 0
            # For each contact, send the email:

            names_str = ",".join(names)
            emails_str = ",".join(emails)

            # //texts_str in
            # //zip(, texts):

            try:
                if counts == int(count_entry.get()):
                    print("Please, wait, the timer is working...")
                    time.sleep(60 * int(interval_entry.get()))
                    counts = 0

                counts += 1

                # set up the SMTP server
                s = smtplib.SMTP(host='smtp.mail.ru', port=2525)
                s.starttls()
                s.login(mail_entry.get(), pass_entry.get())

                msg = MIMEMultipart()  # create a message

                filename = attachment_text.cget("text")  # attachment_button.
                if filename:
                    attachment = open(filename, 'rb')

                    part = MIMEBase('application', 'octet-stream')
                    part.set_payload((attachment).read())
                    encoders.encode_base64(part)
                    part.add_header(
                        'Content-Disposition',
                        "attachment; filename= %s" % ntpath.basename(filename))

                    msg.attach(part)

                    attachment.close()

                    del attachment

                message_temp = Template(template_text.get("1.0", END))

                if names[counts]:
                    message_temp = MIMEText(
                        Template(template_text.get(
                            "1.0", END)).substitute(PERSON_NAME=names[counts]),
                        'plain')

                msg.attach(message_temp)

                counter += 1
                log = str(datetime.now()) + " " + str(
                    counter) + " Sent to " + emails_str + " Filename: " + ""

                print(log)

                file = open("logs.txt", mode="a", encoding="utf-8")

                file.write(log + "\n")

                file.close()

                # setup the parameters of the message
                msg['From'] = mail_entry.get()
                msg['Bcc'] = emails_str
                msg['Subject'] = theme_entry.get()

                # add in the message body
                # msg.attach(msg.as_string())

                # send the message via the server set up earlier.
                s.send_message(msg)
                del msg

                # Terminate the SMTP session and close the connection
                s.quit()

            except Exception as e2:
                log = str(datetime.now()) + " Error#52:" + str(e2.args)
                file = open("logs.txt", mode="a", encoding="utf-8")
                file.write(log + "\n")
                file.close()
                print("Error#5: " + str(e2.args))
                pass

        print("Sent")
        messagebox.showinfo("Messages", "Sent!")

    except Exception as e3:
        log = str(datetime.now()) + " Error#6:" + str(e3.args)
        file = open("logs.txt", mode="a", encoding="utf-8")
        file.write(log + "\n")
        file.close()
        print("Error#6: " + str(e3.args))
コード例 #43
0
ファイル: trim_upload.py プロジェクト: dhewzulla/bcs3uploader
import os.path
import ntpath
import time
import bcs3util
import datetime
import time

args = sys.argv[1:]
inputvideo_filepath = args[0]

timestamp = int(time.time())

inputvideo_basefilepath, inputvideo_extension = os.path.splitext(
    inputvideo_filepath)

inputvideo_filename = ntpath.basename(inputvideo_filepath)

inputvideo_filename_base, inputvideo_extension = os.path.splitext(
    inputvideo_filename)

inputvideo_trimmedfilepath = '/tmp/' + inputvideo_basefilepath + '_' + str(
    timestamp) + "." + inputvideo_extension

inputvideo_title = inputvideo_filename_base

bcs3util.log("**************" +
             datetime.date.today().strftime("%I:%M%p on %B %d, %Y") + "******")

bcs3util.log("inputvideo_filepath:" + inputvideo_filepath)

bcs3util.log("inputvideo_extension:" + inputvideo_extension)
コード例 #44
0
def feature_extraction_train_regression(folder_name,
                                        mid_window,
                                        mid_step,
                                        short_window,
                                        short_step,
                                        model_type,
                                        model_name,
                                        compute_beat=False):
    """
    This function is used as a wrapper to segment-based audio
    feature extraction and classifier training.
    ARGUMENTS:
        folder_name:        path of directory containing the WAV files
                         and Regression CSVs
        mt_win, mt_step:        mid-term window length and step
        st_win, st_step:        short-term window and step
        model_type:        "svm" or "knn" or "randomforest"
        model_name:        name of the model to be saved
    RETURNS:
        None. Resulting regression model along with the respective
        model parameters are saved on files.
    """
    # STEP A: Feature Extraction:
    features, _, filenames = \
        aF.multiple_directory_feature_extraction([folder_name], mid_window,
                                                 mid_step, short_window,
                                                 short_step,
                                                 compute_beat=compute_beat)
    features = features[0]
    filenames = [ntpath.basename(f) for f in filenames[0]]
    f_final = []

    # Read CSVs:
    csv_files = glob.glob(folder_name + os.sep + "*.csv")
    regression_labels = []
    regression_names = []
    f_final = []
    for c in csv_files:
        cur_regression_labels = []
        f_temp = []
        # open the csv file that contains the current target value's annotations
        with open(c, 'rt') as csvfile:
            csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            for row in csv_reader:
                if len(row) == 2:
                    # ... and if the current filename exists
                    # in the list of filenames
                    if row[0] in filenames:
                        index = filenames.index(row[0])
                        cur_regression_labels.append(float(row[1]))
                        f_temp.append(features[index, :])
                    else:
                        print("Warning: {} not found "
                              "in list of files.".format(row[0]))
                else:
                    print(
                        "Warning: Row with unknown format in regression file")

        f_final.append(np.array(f_temp))
        # cur_regression_labels is the list of values
        # for the current regression problem
        regression_labels.append(np.array(cur_regression_labels))
        # regression task name
        regression_names.append(ntpath.basename(c).replace(".csv", ""))
        if len(features) == 0:
            print("ERROR: No data found in any input folder!")
            return

    # TODO: ARRF WRITE????
    # STEP B: classifier Evaluation and Parameter Selection:
    if model_type == "svm" or model_type == "svm_rbf":
        model_params = np.array(
            [0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0])
    elif model_type == "randomforest":
        model_params = np.array([5, 10, 25, 50, 100])

    errors = []
    errors_base = []
    best_params = []

    for iRegression, r in enumerate(regression_names):
        # get optimal classifeir parameter:
        print("Regression task " + r)
        bestParam, error, berror = evaluate_regression(
            f_final[iRegression], regression_labels[iRegression], 100,
            model_type, model_params)
        errors.append(error)
        errors_base.append(berror)
        best_params.append(bestParam)
        print("Selected params: {0:.5f}".format(bestParam))

        features_norm, mean, std = normalize_features([f_final[iRegression]])

        # STEP C: Save the model to file
        if model_type == "svm":
            classifier, _ = train_svm_regression(
                features_norm[0], regression_labels[iRegression], bestParam)
        if model_type == "svm_rbf":
            classifier, _ = train_svm_regression(
                features_norm[0],
                regression_labels[iRegression],
                bestParam,
                kernel='rbf')
        if model_type == "randomforest":
            classifier, _ = train_random_forest_regression(
                features_norm[0], regression_labels[iRegression], bestParam)

        if model_type == "svm" or model_type == "svm_rbf" \
                or model_type == "randomforest":
            with open(model_name + "_" + r, 'wb') as fid:
                cPickle.dump(classifier, fid)
            save_path = model_name + "_" + r + "MEANS"
            save_parameters(save_path, mean, std, mid_window, mid_step,
                            short_window, short_step, compute_beat)

    return errors, errors_base, best_params
コード例 #45
0
message = 'Ola! Segue o relatorio extraido do portal da transportadora em formato CSV. Att., Rodrigo Matos.'
file_location = pathcoleta + 'aquila_' + now.strftime('%d%m%y') + '.csv'
password = '******'

msg = MIMEMultipart()

msg['Subject'] = subject
msg['From'] = email
msg['To'] = ', '.join(send_to_email)
msg['Cc'] = ', '.join(cc)

body = message

msg.attach(MIMEText(body, 'plain'))

filename = ntpath.basename(file_location)
attachment = open(file_location, "rb")

part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)

msg.attach(part)

server = smtplib.SMTP('correio.mileniofarma.com.br', 587)
server.starttls()
server.login(email, password)
text = msg.as_string()
server.sendmail(email, send_to_email + cc, text)
server.quit()
コード例 #46
0
ファイル: common.py プロジェクト: zcg19/speakeasy
 def get_base_name(self):
     p = self.get_emu_path()
     img = ntpath.basename(p)
     bn = os.path.splitext(img)[0]
     return bn
コード例 #47
0
def run():
    window_len = 1023
    
    plt.figure() # create a new figure
    w = np.hanning(window_len)
    plt.xlim(0,window_len)
    plt.xticks([0,window_len/2, window_len], ['-M','0','M'])
    plt.ylabel("Weighting w(n)")
    plt.xlabel("Sample")
    plt.plot(w)
    
    
    plt.savefig('/home/user/Desktop/hann_window.pdf', bbox_inches='tight')    

    plt.show()
    
    exit();
    
    
    
    
    
    
    
    
    
    fs = 22050 # sampling rate
    ws = 1024 # window size
    hs = 512 # hop size
    data_path = "/home/user/Desktop/masterarbeit/data/" # where the mp3-files are
    #filenames = [ 'RTL-h5', 'RTL-h17', 'Sat1-h16' ]
    filenames = [ 'RTL-h5', 'RTL-h17', 'ZDF-h17' ]
    
    total_len_sec = 3600.0
    total_len_min = total_len_sec / 60.0
        
    clip_size_seconds = 30.0
    yMax = 70.0
    
    plt.rcParams.update({'font.size': 14})
    
    f, axarr = plt.subplots(len(filenames), sharex=True)

    for file_num, filename in enumerate(filenames):
        loudness = np.loadtxt(data_path + "LoudnessTotal/" + filename + ".LoudnessTotal", delimiter=',')[ : ,1]
        annotation_file = data_path + "annotations_block/" + filename + ".label"


        # calculate mean
        clip_size = int(clip_size_seconds * (loudness.shape[0] / total_len_sec))
        print str(clip_size_seconds) + " seconds = " + str(clip_size) + " frames"
        loudness_shortend = loudness[:(loudness.shape[0] / clip_size) * clip_size] # remove elements at the end in order to get an shape that is a multiple of clip_size
        loudness_reshaped = loudness_shortend.reshape(-1, clip_size)
        loudness_clip = np.mean(loudness_reshaped, axis=1)
    
        axarr[file_num].plot(np.linspace(0, total_len_min, loudness_clip.shape[0]), loudness_clip, color='blue',  linewidth=2.0)
        axarr[file_num].plot(np.linspace(0, total_len_min, loudness.shape[0]), loudness,  color='blue', linewidth=0.2, alpha=0.10)
        axarr[file_num].set_ylim(0, yMax)
        axarr[file_num].set_title("Hour: " + ntpath.basename(filename))
        axarr[file_num].set_ylabel("Loudness")
        #axarr[file_num].plot([0, total_len_min], [np.mean(loudness), np.mean(loudness)], color='red')
        axarr[file_num].fill_between(np.linspace(0, total_len_min, loudness_clip.shape[0]), loudness_clip, np.mean(loudness))
    
    
        # read annotion file
        anno = read_commercial_annotations(annotation_file, commercial_id='2;')
        #print annotations
        for a in anno:
            a_start = a[0] * total_len_min / total_len_sec
            a_end= (a[0]+a[1]) * total_len_min / total_len_sec
            axarr[file_num].fill_between([a_start, a_end], 0, yMax, facecolor='gray', alpha=0.4)
    
    
    f.subplots_adjust(hspace=0.25)
    plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
    plt.xlabel("Time [min]")
    f.set_size_inches(13, 11)
    
    
    plt.savefig('/home/user/Desktop/loudness_during_commercial_block.png', bbox_inches='tight')    
    plt.show()

    
    exit()
    
    
    
    
    
    
    
    if stop:
        signal = np.frombuffer(audio_converter.decode_to_memory(filename, sample_rate=fs, skip=start, maxlen=stop-start),\
                dtype=np.float32)
    else: 
        signal = np.frombuffer(audio_converter.decode_to_memory(filename, sample_rate=fs), dtype=np.float32)









    magspec = abs(Spectrogram.spectrogram(signal, ws=ws, hs=hs))
    print "magpsec shape: %s"%(magspec.shape,)
    print "signal shape: %s"%(signal.shape,)

    # save magspec to /tmp/
    np.savez('/tmp/magspec.npz', magspec)
    
    # downsample
    signal_downsampled = signal[::16384]
    print "signal_downsampled shape: %s"%(signal_downsampled.shape,)

    # set ticks, ticklabels in seconds
    length = magspec.shape[1]
    
    length_sec = Spectrogram.frameidx2time(length, ws=ws, hs=hs, fs=fs)
    tickdist_seconds = 60 # one tick every n seconds
    tickdist_labels_in_minutes = 60 # for seconds use 1; for minutes 60
    numticks = length_sec/tickdist_seconds
    tick_per_dist = int(round(length / numticks))
    xtickrange = range(length)[::tick_per_dist]
    xticklabels = ["%d"%(round(Spectrogram.frameidx2time(i, ws=ws, hs=hs, fs=fs))/tickdist_labels_in_minutes) for i in xtickrange]

    #plt.subplot(211)
    #plt.plot(signal_downsampled)
    #plt.xticks(xtickrange, xticklabels, rotation=70, fontsize=8)
    #plt.title("signal")
    

    # energy
    clip_size = 512
    energy = np.sum(magspec, axis=0)
    print "energy shape: %s"%(energy.shape)
    energy_shortend = energy[:(energy.shape[0] / clip_size) * clip_size] # remove elements at the end in order to get an shape that is a multiple of clip_size
    print "energy_shortend shape: %s"%(energy_shortend.shape)
    energy_reshaped = energy_shortend.reshape(-1, clip_size)
    print "energy_reshaped shape: " + str(energy_reshaped.shape)
    energy_clip = np.mean(energy_reshaped, axis=1)
    print "energy_reshaped shape: " + str(energy_clip.shape)
    
    #TODO: use data of RTL-h8.LoudnessTotal

    #spectrogram_xscale = plt.xlim()  # just to scale it the same way the spectrograms were scaled
    plt.subplot(111)
    plt.plot(energy_clip)
    #plt.xlim(spectrogram_xscale)
    #plt.xticks(xtickrange, xticklabels, rotation=70, fontsize=8)
    plt.title("energy")
    
    plt.suptitle("File: " + ntpath.basename(filename) + "  |  Time: " + str(round(start/60,1)) + " - " + str(round((start+length_sec)/60, 1)) + " [min]")
    plt.show()
コード例 #48
0
ファイル: wrapper.py プロジェクト: sk-sahu/snakemake-wrappers
__author__ = "Felix Mölder"
__copyright__ = "Copyright 2020, Felix Mölder"
__email__ = "*****@*****.**"
__license__ = "MIT"

from snakemake.shell import shell
from pathlib import Path
import ntpath

reads = "-f {r1}".format(r1=snakemake.input.r1)
read_prefix = ntpath.basename(snakemake.input.r1).split("_R1")[0]
intermediate_output_r1 = snakemake.output.r1.strip(".gz")

if snakemake.input.get("r2", ""):
    seqmode = "pair"
    reads = "{reads} -r {r2}".format(reads=reads, r2=snakemake.input.r2)
    intermediate_output_r2 = snakemake.output.r2.strip(".gz")
else:
    seqmode = "single"

primers = snakemake.params.primers
outdir = Path(snakemake.output[0]).parent.resolve()
log = snakemake.log_fmt_shell(stdout=True, stderr=True)


ptrimmer_params = "-s {mode} {reads} -a {primers} -o {out}".format(
    mode=seqmode, reads=reads, primers=primers, out=outdir
)

process_r1 = "mv {outdir}/{prefix}_trim_R1.fq {out}".format(
    outdir=outdir, prefix=read_prefix, out=intermediate_output_r1
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_dir", type=str, default="/data/physionet_sleep",
                        help="File path to the CSV or NPY file that contains walking data.")
    parser.add_argument("--output_dir", type=str, default="/data/physionet_sleep/eeg_fpz_cz",
                        help="Directory where to save outputs.")
    parser.add_argument("--select_ch", type=str, default="EEG Fpz-Cz",
                        help="File path to the trained model used to estimate walking speeds.")
    args = parser.parse_args()

    print(args.output_dir)
    # Output dir
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    else:
        shutil.rmtree(args.output_dir)
        os.makedirs(args.output_dir)

    # Select channel
    select_ch = args.select_ch
    print(select_ch)
    # Read raw and annotation EDF files
    psg_fnames = glob.glob(os.path.join(args.data_dir, "*PSG.edf"))
    ann_fnames = glob.glob(os.path.join(args.data_dir, "*Hypnogram.edf"))
    psg_fnames.sort()
    ann_fnames.sort()
    psg_fnames = np.asarray(psg_fnames)
    print(psg_fnames)
    ann_fnames = np.asarray(ann_fnames)
    print("Hii1\n")
    print(ann_fnames)	
    for i in range(len(psg_fnames)):
        # if not "ST7171J0-PSG.edf" in psg_fnames[i]:
        #     continue

        print("hii2")
	raw = read_raw_edf(psg_fnames[i], preload=True, stim_channel=None)
        print(raw)
	print("hiii3")
	sampling_rate = raw.info['sfreq']
        raw_ch_df = raw.to_data_frame(scaling_time=100.0)[select_ch]
        raw_ch_df = raw_ch_df.to_frame()
        raw_ch_df.set_index(np.arange(len(raw_ch_df)))
	
	print("hii3")

        # Get raw header
        f = open(psg_fnames[i], 'r')
        reader_raw = dhedfreader.BaseEDFReader(f)
        reader_raw.read_header()
        h_raw = reader_raw.header
        f.close()
        raw_start_dt = datetime.strptime(h_raw['date_time'], "%Y-%m-%d %H:%M:%S")
	
	
        # Read annotation and its header
        f = open(ann_fnames[i], 'r')
        reader_ann = dhedfreader.BaseEDFReader(f)
        reader_ann.read_header()
        h_ann = reader_ann.header
        _, _, ann = zip(*reader_ann.records())
        f.close()
        ann_start_dt = datetime.strptime(h_ann['date_time'], "%Y-%m-%d %H:%M:%S")

        # Assert that raw and annotation files start at the same time
        assert raw_start_dt == ann_start_dt

        # Generate label and remove indices
        remove_idx = []    # indicies of the data that will be removed
        labels = []        # indicies of the data that have labels
        label_idx = []
        for a in ann[0]:
            onset_sec, duration_sec, ann_char = a
            ann_str = "".join(ann_char)
            label = ann2label[ann_str]
            if label != UNKNOWN:
                if duration_sec % EPOCH_SEC_SIZE != 0:
                    raise Exception("Something wrong")
                duration_epoch = int(duration_sec / EPOCH_SEC_SIZE)
                label_epoch = np.ones(duration_epoch, dtype=np.int) * label
                labels.append(label_epoch)
                idx = int(onset_sec * sampling_rate) + np.arange(duration_sec * sampling_rate, dtype=np.int)
                label_idx.append(idx)

                print "Include onset:{}, duration:{}, label:{} ({})".format(
                    onset_sec, duration_sec, label, ann_str
                )
            else:
                idx = int(onset_sec * sampling_rate) + np.arange(duration_sec * sampling_rate, dtype=np.int)
                remove_idx.append(idx)

                print "Remove onset:{}, duration:{}, label:{} ({})".format(
                    onset_sec, duration_sec, label, ann_str
                )
        labels = np.hstack(labels)
        
        print "before remove unwanted: {}".format(np.arange(len(raw_ch_df)).shape)
        if len(remove_idx) > 0:
            remove_idx = np.hstack(remove_idx)
            select_idx = np.setdiff1d(np.arange(len(raw_ch_df)), remove_idx)
        else:
            select_idx = np.arange(len(raw_ch_df))
        print "after remove unwanted: {}".format(select_idx.shape)

        # Select only the data with labels
        print "before intersect label: {}".format(select_idx.shape)
        label_idx = np.hstack(label_idx)
        select_idx = np.intersect1d(select_idx, label_idx)
        print "after intersect label: {}".format(select_idx.shape)

        # Remove extra index
        if len(label_idx) > len(select_idx):
            print "before remove extra labels: {}, {}".format(select_idx.shape, labels.shape)
            extra_idx = np.setdiff1d(label_idx, select_idx)
            # Trim the tail
            if np.all(extra_idx > select_idx[-1]):
                n_trims = len(select_idx) % int(EPOCH_SEC_SIZE * sampling_rate)
                n_label_trims = int(math.ceil(n_trims / (EPOCH_SEC_SIZE * sampling_rate)))
                select_idx = select_idx[:-n_trims]
                labels = labels[:-n_label_trims]
            print "after remove extra labels: {}, {}".format(select_idx.shape, labels.shape)

        # Remove movement and unknown stages if any
        raw_ch = raw_ch_df.values[select_idx]

        # Verify that we can split into 30-s epochs
        if len(raw_ch) % (EPOCH_SEC_SIZE * sampling_rate) != 0:
            raise Exception("Something wrong")
        n_epochs = len(raw_ch) / (EPOCH_SEC_SIZE * sampling_rate)

        # Get epochs and their corresponding labels
        x = np.asarray(np.split(raw_ch, n_epochs)).astype(np.float32)
        y = labels.astype(np.int32)

        assert len(x) == len(y)

        # Select on sleep periods
        w_edge_mins = 30
        nw_idx = np.where(y != stage_dict["W"])[0]
        start_idx = nw_idx[0] - (w_edge_mins * 2)
        end_idx = nw_idx[-1] + (w_edge_mins * 2)
        if start_idx < 0: start_idx = 0
        if end_idx >= len(y): end_idx = len(y) - 1
        select_idx = np.arange(start_idx, end_idx+1)
        print("Data before selection: {}, {}".format(x.shape, y.shape))
        x = x[select_idx]
        y = y[select_idx]
        print("Data after selection: {}, {}".format(x.shape, y.shape))

        # Save
        filename = ntpath.basename(psg_fnames[i]).replace("-PSG.edf", ".npz")
        save_dict = {
            "x": x, 
            "y": y, 
            "fs": sampling_rate,
            "ch_label": select_ch,
            "header_raw": h_raw,
            "header_annotation": h_ann,
        }
        np.savez(os.path.join(args.output_dir, filename), **save_dict)

        print "\n=======================================\n"
コード例 #50
0
ファイル: Gtk_MenuBar.py プロジェクト: pyq881120/springbok
    def file_popup_menu(self, filename):
        """Detect firewall type and parse the conf file"""
        def iter_next():
            # unblock file
            self.next_file = True

        Gtk_Main.Gtk_Main().statusbar.change_message("Import %s" % (filename))
        progressBar = gtk.ProgressBar(adjustment=None)
        progressBar.set_text("Parsing File")
        progressBar.set_fraction(0)

        vbox = gtk.VBox()
        vbox.pack_start(progressBar)

        button_radio = []
        for p in Parser.parser_list:
            tmp_radio = gtk.RadioButton(
                button_radio[0][0] if button_radio else None, p[1])
            button_radio.append((tmp_radio, p[0]))
            vbox.pack_start(tmp_radio)

        button_cancel = gtk.Button("Cancel")
        button_start = gtk.Button("Start")
        hbox = gtk.HBox()
        hbox.pack_start(button_cancel)
        hbox.pack_start(button_start)

        popup = gtk.Window()
        popup.set_title(ntpath.basename(filename))
        popup.connect("destroy", lambda x: iter_next())

        popup.set_modal(True)
        popup.set_transient_for(Gtk_Main.Gtk_Main().window)
        popup.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)

        vbox.pack_start(hbox)
        popup.add(vbox)

        popup.show_all()

        supposed_type = Parser.suppose_type(filename)
        for p in button_radio:
            if p[1] == supposed_type:
                p[0].set_active(True)

        def on_click(widget):
            parser_module = 'Parser.CiscoAsa.CiscoAsaYacc'
            for p in button_radio:
                if p[0].get_active():
                    parser_module = p[1]

            firewalls = Parser.parser(filename, parser_module, progressBar)
            for fw in firewalls:
                NetworkGraph.NetworkGraph().network_graph(fw)
                Gtk_Main.Gtk_Main().lateral_pane.firewalls.add_row(fw.hostname)
                Gtk_Main.Gtk_Main().lateral_pane.focus_firewall()
            Gtk_Main.Gtk_Main().draw()
            popup.destroy()
            self.tmp_fw_list += firewalls

        button_start.connect("clicked", on_click)
        button_cancel.connect("clicked", lambda x: popup.destroy())
コード例 #51
0
def log_error(filename, line, column, error_code, msg):
    """Print the error to the command line."""
    click.echo(
        ntpath.basename(filename) + ":" + str(1 + line) + ":" +
        str(1 + column) + ": " + error_code + " " + msg)
コード例 #52
0
def upload():
    # Create original.csv with contents from request csv

    requestId = uuid.uuid4().hex
    inputPath = 'ServiceInput/' + requestId + '/'
    outputPath = 'ServiceOutput/' + requestId + '/'

    # Create separate input and output folders for every request
    try:
        os.mkdir(inputPath)
        os.mkdir(outputPath)
    except:
        app.logger.error("Couldn't create folders for requestid " + requestId)
        sys.exit(0)

    # Get the input file from the request
    with open(inputPath + 'original.csv', 'wb') as fp:
        fp.write(request.files['file'].read())

    # Get columns to work on if specified
    columns = request.form.get('columns')
    if columns:
        # columns param was specified
        columns = json.loads(columns)['names']
        columns = list(map(str, columns))
    else:
        # No columns param in request
        app.logger.info("No param columns. Running on all columns..")
        columns = []

    blackList = request.form.get('blackList')
    if blackList:
        blackList = json.loads(blackList)['QNodes']
        with open(outputPath + 'blacklist.json', 'w') as fp:
            json.dump(blackList, fp)
        blackList = "True"
    else:
        blackList = "False"

    # Get wikifyPercentage from request. Column will be in result only if wikifyPercentage of rows have been wikified
    # Default value 0.5
    wikifyPercentage = request.form.get('wikifyPercentage')
    if not wikifyPercentage:
        wikifyPercentage = "0"

    # Get output format from request Excel/ISWC
    # Default value Excel
    formatType = request.form.get('format')
    if not formatType:
        formatType = "Excel"

    retType = request.form.get('retType')
    if not retType:
        retType = 'CSV'

    header = request.form.get('header')
    if not header:
        header = "True"

    K = request.form.get('K')
    if not K:
        K = "0"

    # Run the pipeline
    approach = request.form.get('approach')

    phase = request.form.get('phase')
    if not phase:
        phase = 'train'
    if phase == 'test':
        columnClass = request.form.get('columnClass')
        if not columnClass: phase = 'train'
        else:
            columnClass = json.loads(columnClass)['names']
            columnClass = list(map(str, columnClass))
            if len(columns) != len(columnClass):
                phase = 'train'
    if phase == 'test':
        #get column name: test columnClass mapping
        classMap = dict(zip(columns, columnClass))
        json.dump(classMap, open(outputPath + 'columnClass.json', 'w'))

    subprocess.run([
        sys.executable, "-u", "Starter.py", requestId, "original.csv", header
    ] + columns,
                   check=True)
    if not approach:
        subprocess.run(
            [sys.executable, "-u", "WikifierService.py", requestId, phase])
        subprocess.run([sys.executable, "-u", "picker.py", requestId])
    elif approach == 'tfidf':
        confidence = request.form.get('confidence')
        if not confidence:
            confidence = "0"
        use_wikidata_class = request.form.get('use_wikidata_class')
        if not use_wikidata_class:
            use_wikidata_class = "True"
        use_dbpedia_class = request.form.get('use_dbpedia_class')
        if not use_dbpedia_class:
            use_dbpedia_class = "True"
        use_wikidata_props = request.form.get('use_wikidata_props')
        if not use_wikidata_props:
            use_wikidata_props = "True"
        use_tf = request.form.get('use_tf')
        if not use_tf:
            use_tf = "pedro"
        use_df = request.form.get('use_df')
        if not use_df:
            use_df = "jay"
        subprocess.run([
            sys.executable, "-u", "tfidf.py", requestId, confidence,
            use_wikidata_class, use_dbpedia_class, use_wikidata_props, use_tf,
            use_df
        ])
        #print(["tfidf.py", requestId, confidence, use_wikidata_class, use_dbpedia_class, use_wikidata_props, use_tf, use_df])
    subprocess.run([
        sys.executable, "-u", "ServiceOutput.py", requestId, wikifyPercentage,
        formatType, header, K, blackList,
        str(retType == 'JSON')
    ])

    if retType == 'CSV':
        stringIO = io.StringIO()
        writeCSV = csv.writer(stringIO)
        with open(outputPath + 'original.csv') as csvfile:
            readCSV = csv.reader(csvfile, delimiter=',')
            writeCSV.writerows(readCSV)
        data = stringIO.getvalue()
    elif retType == 'JSON':
        with open(outputPath + 'original.json') as jsonfile:
            data = json.loads(json.load(jsonfile))

    if not approach:
        processedColumns = glob.glob(outputPath + 'original_*_chosenclass.csv')
        chosenClass = {}
        for col in processedColumns:
            with open(col) as fp:
                chosenClass[re.match(
                    "original_(.*?)_chosenclass.csv",
                    ntpath.basename(col)).group(1)] = fp.readline().strip(
                        '\n').split(',')[-1]
        output = {"data": data, "class": chosenClass}
    else:
        output = {"data": data}
    return json.dumps(output)
コード例 #53
0
def main_worker(args):
    global best_prec1, dtype
    acc = -1
    loss = -1
    best_prec1 = 0
    dtype = torch_dtypes.get(args.dtype)
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)

    args.distributed = args.local_rank >= 0 or args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_init,
                                world_size=args.world_size,
                                rank=args.local_rank)
        args.local_rank = dist.get_rank()
        args.world_size = dist.get_world_size()
        if args.dist_backend == 'mpi':
            # If using MPI, select all visible devices
            args.device_ids = list(range(torch.cuda.device_count()))
        else:
            args.device_ids = [args.local_rank]

    if not os.path.exists(save_path) and not (args.distributed
                                              and args.local_rank > 0):
        os.makedirs(save_path)

    setup_logging(os.path.join(save_path, 'log.txt'),
                  resume=args.resume is not '',
                  dummy=args.distributed and args.local_rank > 0)

    results_path = os.path.join(save_path, 'results')
    results = ResultsLog(results_path,
                         title='Training Results - %s' % args.save)

    logging.info("saving to %s", save_path)
    logging.debug("run arguments: %s", args)
    logging.info("creating model %s", args.model)

    if 'cuda' in args.device and torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)
        torch.cuda.set_device(args.device_ids[0])
        cudnn.benchmark = True
    else:
        args.device_ids = None

    # create model
    model = models.__dict__[args.model]
    dataset_type = 'imagenet' if args.dataset == 'imagenet_calib' else args.dataset
    model_config = {'dataset': dataset_type}

    if args.model_config is not '':
        if isinstance(args.model_config, dict):
            for k, v in args.model_config.items():
                if k not in model_config.keys():
                    model_config[k] = v
        else:
            args_dict = literal_eval(args.model_config)
            for k, v in args_dict.items():
                model_config[k] = v
    if (args.absorb_bn or args.load_from_vision
            or args.pretrained) and not args.batch_norn_tuning:
        if args.load_from_vision:
            import torchvision
            exec_lfv_str = 'torchvision.models.' + args.load_from_vision + '(pretrained=True)'
            model = eval(exec_lfv_str)
            if 'pytcv' in args.model:
                from pytorchcv.model_provider import get_model as ptcv_get_model
                exec_lfv_str = 'ptcv_get_model("' + args.load_from_vision + '", pretrained=True)'
                model_pytcv = eval(exec_lfv_str)
                model = convert_pytcv_model(model, model_pytcv)
        else:
            if not os.path.isfile(args.absorb_bn):
                parser.error('invalid checkpoint: {}'.format(args.evaluate))
            model = model(**model_config)
            checkpoint = torch.load(args.absorb_bn,
                                    map_location=lambda storage, loc: storage)
            checkpoint = checkpoint[
                'state_dict'] if 'state_dict' in checkpoint.keys(
                ) else checkpoint
            model.load_state_dict(checkpoint, strict=False)
        if 'batch_norm' in model_config and not model_config['batch_norm']:
            logging.info('Creating absorb_bn state dict')
            search_absorbe_bn(model)
            filename_ab = args.absorb_bn + '.absorb_bn' if args.absorb_bn else save_path + '/' + args.model + '.absorb_bn'
            torch.save(model.state_dict(), filename_ab)
        else:
            filename_bn = save_path + '/' + args.model + '.with_bn'
            torch.save(model.state_dict(), filename_bn)
        if (args.load_from_vision
                or args.absorb_bn) and not args.evaluate_init_configuration:
            return

    if 'inception' in args.model:
        model = model(init_weights=False, **model_config)
    else:
        model = model(**model_config)
    logging.info("created model with configuration: %s", model_config)

    num_parameters = sum([l.nelement() for l in model.parameters()])
    logging.info("number of parameters: %d", num_parameters)

    # optionally resume from a checkpoint
    if args.evaluate:
        if not os.path.isfile(args.evaluate):
            parser.error('invalid checkpoint: {}'.format(args.evaluate))
        checkpoint = torch.load(args.evaluate, map_location="cpu")
        # Overrride configuration with checkpoint info
        args.model = checkpoint.get('model', args.model)
        args.model_config = checkpoint.get('config', args.model_config)
        if not model_config['batch_norm']:
            search_absorbe_fake_bn(model)
        # load checkpoint
        if 'state_dict' in checkpoint.keys():
            model.load_state_dict(checkpoint['state_dict'])
            logging.info("loaded checkpoint '%s'", args.evaluate)
        else:
            model.load_state_dict(checkpoint, strict=False)
            logging.info("loaded checkpoint '%s'", args.evaluate)

    if args.resume:
        checkpoint_file = args.resume
        if os.path.isdir(checkpoint_file):
            results.load(os.path.join(checkpoint_file, 'results.csv'))
            checkpoint_file = os.path.join(checkpoint_file,
                                           'model_best.pth.tar')
        if os.path.isfile(checkpoint_file):
            logging.info("loading checkpoint '%s'", args.resume)
            checkpoint = torch.load(checkpoint_file)
            if args.start_epoch < 0:  # not explicitly set
                args.start_epoch = checkpoint[
                    'epoch'] - 1 if 'epoch' in checkpoint.keys() else 0
            best_prec1 = checkpoint[
                'best_prec1'] if 'best_prec1' in checkpoint.keys() else -1
            sd = checkpoint['state_dict'] if 'state_dict' in checkpoint.keys(
            ) else checkpoint
            model.load_state_dict(sd, strict=False)
            logging.info("loaded checkpoint '%s' (epoch %s)", checkpoint_file,
                         args.start_epoch)
        else:
            logging.error("no checkpoint found at '%s'", args.resume)

    # define loss function (criterion) and optimizer
    loss_params = {}
    if args.label_smoothing > 0:
        loss_params['smooth_eps'] = args.label_smoothing
    criterion = getattr(model, 'criterion', CrossEntropyLoss)(**loss_params)
    if args.kld_loss:
        criterion = nn.KLDivLoss(reduction='mean')
    criterion.to(args.device, dtype)
    model.to(args.device, dtype)

    # Batch-norm should always be done in float
    if 'half' in args.dtype:
        FilterModules(model, module=is_bn).to(dtype=torch.float)

    # optimizer configuration
    optim_regime = getattr(model, 'regime', [{
        'epoch': 0,
        'optimizer': args.optimizer,
        'lr': args.lr,
        'momentum': args.momentum,
        'weight_decay': args.weight_decay
    }])
    if args.fine_tune or args.prune:
        if not args.resume: args.start_epoch = 0
        if args.update_only_th:
            #optim_regime = [
            #    {'epoch': 0, 'optimizer': 'Adam', 'lr': 1e-4}]
            optim_regime = [{
                'epoch': 0,
                'optimizer': 'SGD',
                'lr': 1e-1
            }, {
                'epoch': 10,
                'lr': 1e-2
            }, {
                'epoch': 15,
                'lr': 1e-3
            }]
        else:
            optim_regime = [{
                'epoch': 0,
                'optimizer': 'SGD',
                'lr': 1e-4,
                'momentum': 0.9
            }, {
                'epoch': 2,
                'lr': 1e-5,
                'momentum': 0.9
            }, {
                'epoch': 10,
                'lr': 1e-6,
                'momentum': 0.9
            }]
    optimizer = optim_regime if isinstance(optim_regime, OptimRegime) \
        else OptimRegime(model, optim_regime, use_float_copy='half' in args.dtype)

    # Training Data loading code

    train_data = DataRegime(getattr(model, 'data_regime', None),
                            defaults={
                                'datasets_path': args.datasets_dir,
                                'name': args.dataset,
                                'split': 'train',
                                'augment': False,
                                'input_size': args.input_size,
                                'batch_size': args.batch_size,
                                'shuffle': not args.seq_adaquant,
                                'num_workers': args.workers,
                                'pin_memory': True,
                                'drop_last': True,
                                'distributed': args.distributed,
                                'duplicates': args.duplicates,
                                'autoaugment': args.autoaugment,
                                'cutout': {
                                    'holes': 1,
                                    'length': 16
                                } if args.cutout else None,
                                'inception_prep': 'inception' in args.model
                            })
    if args.names_sp_layers is None and args.layers_precision_dict is None:
        args.names_sp_layers = [
            key[:-7] for key in model.state_dict().keys()
            if 'weight' in key and 'running' not in key and (
                'conv' in key or 'downsample.0' in key or 'fc' in key)
        ]
        if args.keep_first_last:
            args.names_sp_layers = [
                name for name in args.names_sp_layers if name != 'conv1'
                and name != 'fc' and name != 'Conv2d_1a_3x3.conv'
            ]
        args.names_sp_layers = [
            k for k in args.names_sp_layers if 'downsample' not in k
        ] if args.ignore_downsample else args.names_sp_layers
        if args.num_sp_layers == 0 and not args.keep_first_last:
            args.names_sp_layers = []

    if args.layers_precision_dict is not None:
        print(args.layers_precision_dict)

    prunner = None
    trainer = Trainer(model,
                      prunner,
                      criterion,
                      optimizer,
                      device_ids=args.device_ids,
                      device=args.device,
                      dtype=dtype,
                      distributed=args.distributed,
                      local_rank=args.local_rank,
                      mixup=args.mixup,
                      loss_scale=args.loss_scale,
                      grad_clip=args.grad_clip,
                      print_freq=args.print_freq,
                      adapt_grad_norm=args.adapt_grad_norm,
                      epoch=args.start_epoch,
                      update_only_th=args.update_only_th,
                      optimize_rounding=args.optimize_rounding)

    # Evaluation Data loading code
    args.eval_batch_size = args.eval_batch_size if args.eval_batch_size > 0 else args.batch_size
    dataset_type = 'imagenet' if args.dataset == 'imagenet_calib' else args.dataset
    val_data = DataRegime(getattr(model, 'data_eval_regime', None),
                          defaults={
                              'datasets_path': args.datasets_dir,
                              'name': dataset_type,
                              'split': 'val',
                              'augment': False,
                              'input_size': args.input_size,
                              'batch_size': args.eval_batch_size,
                              'shuffle': True,
                              'num_workers': args.workers,
                              'pin_memory': True,
                              'drop_last': False
                          })

    if args.evaluate or args.resume:
        from utils.layer_sensativity import search_replace_layer, extract_save_quant_state_dict, search_replace_layer_from_dict
        if args.layers_precision_dict is not None:
            model = search_replace_layer_from_dict(
                model, ast.literal_eval(args.layers_precision_dict))
        else:
            model = search_replace_layer(model,
                                         args.names_sp_layers,
                                         num_bits_activation=args.nbits_act,
                                         num_bits_weight=args.nbits_weight)

    cached_input_output = {}
    quant_keys = [
        '.weight', '.bias', '.equ_scale', '.quantize_input.running_zero_point',
        '.quantize_input.running_range', '.quantize_weight.running_zero_point',
        '.quantize_weight.running_range',
        '.quantize_input1.running_zero_point', '.quantize_input1.running_range'
        '.quantize_input2.running_zero_point', '.quantize_input2.running_range'
    ]
    if args.adaquant:

        def Qhook(name, module, input, output):
            if module not in cached_qinput:
                cached_qinput[module] = []
            # Meanwhile store data in the RAM.
            cached_qinput[module].append(input[0].detach().cpu())
            # print(name)

        def hook(name, module, input, output):
            if module not in cached_input_output:
                cached_input_output[module] = []
            # Meanwhile store data in the RAM.
            cached_input_output[module].append(
                (input[0].detach().cpu(), output.detach().cpu()))
            # print(name)

        from models.modules.quantize import QConv2d, QLinear
        handlers = []
        count = 0
        for name, m in model.named_modules():
            if isinstance(m, QConv2d) or isinstance(m, QLinear):
                #if isinstance(m, QConv2d) or isinstance(m, QLinear):
                # if isinstance(m, QConv2d):
                m.quantize = False
                if count < 1000:
                    # if (isinstance(m, QConv2d) and m.groups == 1) or isinstance(m, QLinear):
                    handlers.append(
                        m.register_forward_hook(partial(hook, name)))
                    count += 1

        # Store input/output for all quantizable layers
        trainer.validate(train_data.get_loader())
        print("Input/outputs cached")

        for handler in handlers:
            handler.remove()

        for m in model.modules():
            if isinstance(m, QConv2d) or isinstance(m, QLinear):
                m.quantize = True

        mse_df = pd.DataFrame(
            index=np.arange(len(cached_input_output)),
            columns=['name', 'bit', 'shape', 'mse_before', 'mse_after'])
        print_freq = 100
        for i, layer in enumerate(cached_input_output):
            if i > 0 and args.seq_adaquant:
                count = 0
                cached_qinput = {}
                for name, m in model.named_modules():
                    if layer.name == name:
                        if count < 1000:
                            handler = m.register_forward_hook(
                                partial(Qhook, name))
                            count += 1
                # Store input/output for all quantizable layers
                trainer.validate(train_data.get_loader())
                print("cashed quant Input%s" % layer.name)
                cached_input_output[layer][0] = (
                    cached_qinput[layer][0], cached_input_output[layer][0][1])
                handler.remove()
            print("\nOptimize {}:{} for {} bit of shape {}".format(
                i, layer.name, layer.num_bits, layer.weight.shape))
            mse_before, mse_after, snr_before, snr_after, kurt_in, kurt_w = \
                optimize_layer(layer, cached_input_output[layer], args.optimize_weights, batch_size=args.batch_size, model_name=args.model)
            print("\nMSE before optimization: {}".format(mse_before))
            print("MSE after optimization:  {}".format(mse_after))
            mse_df.loc[i, 'name'] = layer.name
            mse_df.loc[i, 'bit'] = layer.num_bits
            mse_df.loc[i, 'shape'] = str(layer.weight.shape)
            mse_df.loc[i, 'mse_before'] = mse_before
            mse_df.loc[i, 'mse_after'] = mse_after
            mse_df.loc[i, 'snr_before'] = snr_before
            mse_df.loc[i, 'snr_after'] = snr_after
            mse_df.loc[i, 'kurt_in'] = kurt_in
            mse_df.loc[i, 'kurt_w'] = kurt_w

        mse_csv = args.evaluate + '.mse.csv'
        mse_df.to_csv(mse_csv)

        filename = args.evaluate + '.adaquant'
        torch.save(model.state_dict(), filename)

        train_data = None
        cached_input_output = None
        val_results = trainer.validate(val_data.get_loader())
        logging.info(val_results)

        if args.res_log is not None:
            if not os.path.exists(args.res_log):
                df = pd.DataFrame()
            else:
                df = pd.read_csv(args.res_log, index_col=0)

            ckp = ntpath.basename(args.evaluate)
            if args.cmp is not None:
                ckp += '_{}'.format(args.cmp)
            adaquant_type = 'adaquant_seq' if args.seq_adaquant else 'adaquant_parallel'
            df.loc[ckp, 'acc_' + adaquant_type] = val_results['prec1']
            df.to_csv(args.res_log)
            # print(df)

    elif args.per_layer:
        # Store input/output for all quantizable layers
        calib_all_8_results = trainer.validate(train_data.get_loader())
        print('########## All 8bit results ###########', calib_all_8_results)
        int8_opt_model_state_dict = torch.load(args.int8_opt_model_path)
        int4_opt_model_state_dict = torch.load(args.int4_opt_model_path)

        per_layer_results = {}
        args.names_sp_layers = [
            key[:-7] for key in model.state_dict().keys()
            if 'weight' in key and 'running' not in key and 'quantize' not in
            key and ('conv' in key or 'downsample.0' in key or 'fc' in key)
        ]
        for layer_idx, layer in enumerate(args.names_sp_layers):
            model.load_state_dict(int8_opt_model_state_dict, strict=False)
            model = search_replace_layer(model, [layer],
                                         num_bits_activation=args.nbits_act,
                                         num_bits_weight=args.nbits_weight)
            layer_keys = [
                key for key in int8_opt_model_state_dict
                for qpkey in quant_keys if layer + qpkey == key
            ]
            for key in layer_keys:
                model.state_dict()[key].copy_(int4_opt_model_state_dict[key])
            calib_results = trainer.validate(train_data.get_loader())
            model = search_replace_layer(model, [layer],
                                         num_bits_activation=8,
                                         num_bits_weight=8)
            print('finished %d out of %d' %
                  (layer_idx, len(args.names_sp_layers)))
            logging.info(layer)
            logging.info(calib_results)
            per_layer_results[layer] = {
                'base precision':
                8,
                'replaced precision':
                args.nbits_act,
                'replaced layer':
                layer,
                'accuracy':
                calib_results['prec1'],
                'loss':
                calib_results['loss'],
                'Parameters Size [Elements]':
                model.state_dict()[layer + '.weight'].numel(),
                'MACs':
                '-'
            }

        torch.save(
            per_layer_results, args.evaluate + '.per_layer_accuracy.A' +
            str(args.nbits_act) + '.W' + str(args.nbits_weight))
        all_8_dict = {
            'base precision': 8,
            'replaced precision': args.nbits_act,
            'replaced layer': '-',
            'accuracy': calib_all_8_results['prec1'],
            'loss': calib_all_8_results['loss'],
            'Parameters Size [Elements]': '-',
            'MACs': '-'
        }
        columns = [key for key in all_8_dict]
        with open(
                args.evaluate + '.per_layer_accuracy.A' + str(args.nbits_act) +
                '.W' + str(args.nbits_weight) + '.csv', "w") as f:
            f.write(",".join(columns) + "\n")
            col = [str(all_8_dict[c]) for c in all_8_dict.keys()]
            f.write(",".join(col) + "\n")
            for layer in per_layer_results:
                r = per_layer_results[layer]
                col = [str(r[c]) for c in r.keys()]
                f.write(",".join(col) + "\n")
    elif args.mixed_builder:
        if isinstance(args.names_sp_layers, list):
            print('loading int8 model" ', args.int8_opt_model_path)
            int8_opt_model_state_dict = torch.load(args.int8_opt_model_path)
            print('loading int4 model" ', args.int4_opt_model_path)
            int4_opt_model_state_dict = torch.load(args.int4_opt_model_path)

            model.load_state_dict(int8_opt_model_state_dict, strict=False)
            model = search_replace_layer(model,
                                         args.names_sp_layers,
                                         num_bits_activation=args.nbits_act,
                                         num_bits_weight=args.nbits_weight)
            for layer_idx, layer in enumerate(args.names_sp_layers):
                layer_keys = [
                    key for key in int8_opt_model_state_dict
                    for qpkey in quant_keys if layer + qpkey == key
                ]
                for key in layer_keys:
                    model.state_dict()[key].copy_(
                        int4_opt_model_state_dict[key])
                print('switched layer %s to 4 bit' % (layer))
        elif isinstance(args.names_sp_layers, dict):
            quant_models = {}
            base_precision = args.precisions[0]
            for m, prec in zip(args.opt_model_paths, args.precisions):
                print('For precision={}, loading {}'.format(prec, m))
                quant_models[prec] = torch.load(m)
            model.load_state_dict(quant_models[base_precision], strict=False)
            for layer_name, nbits_list in args.names_sp_layers.items():
                model = search_replace_layer(model, [layer_name],
                                             num_bits_activation=nbits_list[0],
                                             num_bits_weight=nbits_list[0])
                layer_keys = [
                    key for key in quant_models[base_precision]
                    for qpkey in quant_keys if layer_name + qpkey == key
                ]
                for key in layer_keys:
                    model.state_dict()[key].copy_(
                        quant_models[nbits_list[0]][key])
                print('switched layer {} to {} bit'.format(
                    layer_name, nbits_list[0]))
        if os.environ.get('DEBUG') == 'True':
            from utils.layer_sensativity import check_quantized_model
            fp_names = check_quantized_model(trainer.model)
            if len(fp_names) > 0:
                logging.info('Found FP32 layers in the model:')
                logging.info(fp_names)
        if args.eval_on_train:
            mixedIP_results = trainer.validate(train_data.get_loader())
        else:
            mixedIP_results = trainer.validate(val_data.get_loader())
        torch.save(
            {
                'state_dict': model.state_dict(),
                'config-ip': args.names_sp_layers
            }, args.evaluate + '.mixed-ip-results.' + args.suffix)
        logging.info(mixedIP_results)
        acc = mixedIP_results['prec1']
        loss = mixedIP_results['loss']
    elif args.batch_norn_tuning:
        from utils.layer_sensativity import search_replace_layer, extract_save_quant_state_dict, search_replace_layer_from_dict
        from models.modules.quantize import QConv2d
        if args.layers_precision_dict is not None:
            model = search_replace_layer_from_dict(
                model, literal_eval(args.layers_precision_dict))
        else:
            model = search_replace_layer(model,
                                         args.names_sp_layers,
                                         num_bits_activation=args.nbits_act,
                                         num_bits_weight=args.nbits_weight)

        exec_lfv_str = 'torchvision.models.' + args.load_from_vision + '(pretrained=True)'
        model_orig = eval(exec_lfv_str)
        model_orig.to(args.device, dtype)
        search_copy_bn_params(model_orig)

        layers_orig = dict([(n, m) for n, m in model_orig.named_modules()
                            if isinstance(m, nn.Conv2d)])
        layers_q = dict([(n, m) for n, m in model.named_modules()
                         if isinstance(m, QConv2d)])
        for l in layers_orig:
            conv_orig = layers_orig[l]
            conv_q = layers_q[l]
            conv_q.register_parameter('gamma',
                                      nn.Parameter(conv_orig.gamma.clone()))
            conv_q.register_parameter('beta',
                                      nn.Parameter(conv_orig.beta.clone()))

        del model_orig

        search_add_bn(model)

        print("Run BN tuning")
        for tt in range(args.tuning_iter):
            print(tt)
            trainer.cal_bn_stats(train_data.get_loader())

        search_absorbe_tuning_bn(model)

        filename = args.evaluate + '.bn_tuning'
        print("Save model to: {}".format(filename))
        torch.save(model.state_dict(), filename)

        val_results = trainer.validate(val_data.get_loader())
        logging.info(val_results)

        if args.res_log is not None:
            if not os.path.exists(args.res_log):
                df = pd.DataFrame()
            else:
                df = pd.read_csv(args.res_log, index_col=0)

            ckp = ntpath.basename(args.evaluate)
            df.loc[ckp, 'acc_bn_tuning'] = val_results['prec1']
            df.loc[ckp, 'loss_bn_tuning'] = val_results['loss']
            df.to_csv(args.res_log)
            # print(df)

    elif args.bias_tuning:
        for epoch in range(args.epochs):
            trainer.epoch = epoch
            train_data.set_epoch(epoch)
            val_data.set_epoch(epoch)
            logging.info('\nStarting Epoch: {0}\n'.format(epoch + 1))
            # train for one epoch
            repeat_train = 20 if args.update_only_th else 1
            for tt in range(repeat_train):
                print(tt)
                train_results = trainer.train(
                    train_data.get_loader(),
                    duplicates=train_data.get('duplicates'),
                    chunk_batch=args.chunk_batch)
                logging.info(train_results)

        val_results = trainer.validate(val_data.get_loader())
        logging.info(val_results)
        if args.res_log is not None:
            if not os.path.exists(args.res_log):
                df = pd.DataFrame()
            else:
                df = pd.read_csv(args.res_log, index_col=0)

            ckp = ntpath.basename(args.evaluate)
            if 'bn_tuning' in ckp:
                ckp = ckp.replace('.bn_tuning', '')
            df.loc[ckp, 'acc_bias_tuning'] = val_results['prec1']
            df.to_csv(args.res_log)
        # import pdb; pdb.set_trace()
    else:
        #print('Please Choose one of the following ....')
        if model_config['measure']:
            results = trainer.validate(train_data.get_loader(), rec=args.rec)
            # results = trainer.validate(val_data.get_loader())
            # print(results)
        else:
            if args.evaluate_init_configuration:
                results = trainer.validate(val_data.get_loader())
                if args.res_log is not None:
                    if not os.path.exists(args.res_log):
                        df = pd.DataFrame()
                    else:
                        df = pd.read_csv(args.res_log, index_col=0)

                    ckp = ntpath.basename(args.evaluate)
                    if args.cmp is not None:
                        ckp += '_{}'.format(args.cmp)
                    df.loc[ckp, 'acc_base'] = results['prec1']
                    df.to_csv(args.res_log)

        if args.extract_bias_mean:
            file_name = 'bias_mean_measure' if model_config[
                'measure'] else 'bias_mean_quant'
            torch.save(trainer.bias_mean, file_name)
        if model_config['measure']:
            filename = args.evaluate + '.measure'
            if 'perC' in args.model_config: filename += '_perC'
            torch.save(model.state_dict(), filename)
            logging.info(results)
        else:
            if args.evaluate_init_configuration:
                logging.info(results)
    return acc, loss
コード例 #54
0
def get_copyright_errors(lic, file_path, is_config_file):
    """Returns Errors for errors in the FreeRTOS copyright that is possibly inside of the
    input license, lic. Otherwise an empty list is returned.
    """
    errors = []
    # Flag if the version and copyright are found in the license.
    version_found = False
    copyright_found = False
    for line in lic.split('\n'):

        # Config files do not need a version.
        if COPYRIGHT_NAME in line and is_config_file:
            version_found = True
            continue

        # Portable layer files need a version.
        if COPYRIGHT_NAME in line and 'V' in line:
            version_found = True

            # Get the version error for the portable layer code
            if FIRST_VERSION not in line:
                errors.append(
                    Error(
                        type='error',
                        info=file_path +
                        ' should be the first version of the file (V1.0.0).'))

            # Get the portable layer name error
            start = line.find(COPYRIGHT_NAME) + len(
                COPYRIGHT_NAME) + 1  # +1 for space
            end = line.find('for') - 1  # -1 for space
            name = line[start:end]
            file_name = basename(file_path)
            if name != PORTABLE_LAYER_NAMES[file_name]:
                errors.append(
                    Error(
                        type='error',
                        info='\"' + name +
                        '\" is not the valid portable layer name. It should be changed to: '
                        + PORTABLE_LAYER_NAMES[file_name]))
            continue

        # Get an error if the copyright line is not found for all types of files.
        if 'Copyright (C)' in line and 'Amazon.com, Inc. or its affiliates.  All Rights Reserved.' in line:
            copyright_found = True
            year = str(datetime.now().year)
            if year not in line:
                errors.append(
                    Error(type='error',
                          info=file_path +
                          ' is a new file and it\'s year should be ' + year))

    # If the version line was not found flag errors.
    if not version_found:
        if basename(file_path) == 'FreeRTOSIPConfig.h' or basename(
                file_path) == 'FreeRTOSConfig.h':
            pass  # Skip version warnings for FreeRTOS config files.
        elif is_config_file:
            errors.append(
                Error(
                    type='error',
                    info=file_path +
                    ' is missing required version line: \"FreeRTOS VX.Y.Z\"'))
        else:
            errors.append(
                Error(type='error',
                      info=file_path +
                      'is missing: \"FreeRTOS ... for ... V1.0.0\"'))
    # If the copyright was not found flag errors.
    if not copyright_found:
        errors.append(
            Error(
                type='error',
                info=file_path + ' is missing: \"Copyright (C)' +
                str(datetime.now().year) +
                ' Amazon.com, Inc. or its affiliates.  All Rights Reserved.\"')
        )
    return errors
コード例 #55
0
            for s in range(a, -a - 1, -1):
                for t in range(b, -b - 1, -1):
                    r += kernel[s + a, t + b] * imageInput[x - s, y - t]
            imageOutput[x, y] = int(r)

    return imageOutput

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,\
    help="input image path to processing")
args = vars(ap.parse_args())

# read original image from input image path
inputImagePath = args["input"]
imageFileName = ntpath.basename(inputImagePath)
imageOriginal = cv2.imread(inputImagePath)

# display original image 
cv2.imshow('original', imageOriginal)

# convert RGB to grayscale
imageGrayscale = cv2.cvtColor(imageOriginal, cv2.COLOR_BGR2GRAY)

# display grayscale image
cv2.imshow('grayscale', imageGrayscale)

# spatial Convolution processing
startTime = time.time()
w = np.ones((3, 3), np.uint8)
imageSpatialConvolution = spatialConvolution(imageGrayscale, w)
コード例 #56
0
ファイル: e2tvrecon2d.py プロジェクト: okwh/eman2
def main():
	parser = EMArgumentParser(usage=get_usage())
	parser.add_argument("--tiltseries", default=None, help="The input projections. Project should usually have the xform.projection header attribute, which is used for slice insertion")
	parser.add_argument("--imgnum", default=None, type=int, help="The image number which will be read from the stack when reconstructing an image from a user specified tiltseries.")
	parser.add_argument("--testdata", default=None, help="A 2D image to project a number of times (specified by --nslices and --tiltrange) and then reconstructed via compressed sensing.")
	parser.add_argument("--tlt", default=None, type=str, help="An imod tlt file containing alignment angles. If specified slices will be inserted using these angles in the IMOD convention")
	parser.add_argument("--nslices", default=120, type=int, help="Specify the number slices into which an image will be projected. Only applicable when using the --testdata option.")
	parser.add_argument("--tiltrange", default='60.0', type=str, help="Specify the range of degrees over which data was collected. This defaults to 60 degrees, resulting in the generation of projections from -60.0 to 60.0 degrees.")
	parser.add_argument("--output", default="recon.hdf", help="Output reconstructed tomogram file name.")
	parser.add_argument("--noise",action="store_true",default=False, help="If true, noise will be added to the image before reconstruction.")
	parser.add_argument("--noisiness",default=0.1, type=float, help="Multiply noise by a specified factor. The default value is 0.1")
	parser.add_argument("--path",type=str,default='recon',help="Directory in which results will be stored.")
	parser.add_argument("--niters", default=100, type=int, help="Specify the number of iterative reconstructions to complete before returning the final reconstructed volume.")
	parser.add_argument("--beta", default=0.2, type=float, help="Specify the total-variation regularization/penalization weight parameter 'beta'. The default value is 0.2. Note that this parameter must be greater than 0, but values much greater than 1 will produce cartoon-like reconstructions as a result of the total variation denoising procedure.")
	parser.add_argument("--subpix", default=1, type=int, help="Specify the number of linear subdivisions used to compute the projection of one image pixel onto a detector pixel. Note that this parameter must be a positive integer.")
	parser.add_argument("--fsc",action="store_true",default=False, help="Generate a fourier shell correlation plot comparing the input and output data.")
	parser.add_argument("--norm",default=None, type=str, help="Choose between 'regular', 'anisotropic', and 'l0' TV norms. The default is 'regular'.")
	parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID", default=-1)
	parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help="verbose level [0-9], higner number means higher level of verboseness")
	(options, args) = parser.parse_args()
	
	if options.output:
		outfile = options.output

	if options.tiltseries:
		infile = options.tiltseries
	elif options.testdata:
		infile = options.testdata
		nslices = options.nslices
		tiltrange = options.tiltrange
	else:
		print("ERROR: You must speficy either --testdata OR --tiltseries.")
		exit(1)
	
	if options.imgnum:
		imgnum = int(options.imgnum)
	else: 
		if options.testdata:
			if EMUtil.get_image_count(options.testdata) != 1:
				print("Using the 0th image by default.")
		imgnum = 0
	
	if options.norm:
		if options.norm == 'regular':
			print("This instance will utilize the regular (default) TV norm")
		elif options.norm == 'anisotropic':
			print("This instance will utilize an anisotropic TV norm")
		elif options.norm == 'l0':
			print("This instance will utilize an l0 TV norm")
		else:
			print("The option --norm was specified improperly. Please specify either anisotropic, l0, or regular. Regular is specified by default.")
			exit(1)
	
	if options.tlt != None:
		fangles = np.asarray([ float( i ) for i in open( options.tlt , "r" ) ])
		tiltangles = fangles.tolist()
		nslices = len( tiltangles )
		pass
	elif ( options.nslices and options.tiltrange ):
		tiltrange = float(options.tiltrange)
		nslices = int(options.nslices)
		print("Using tiltrange from -%s, %s degrees consisting of %i slices."%(options.tiltrange, options.tiltrange, options.nslices))
	elif options.testdata:
		print("You must specify --nslices AND --tiltrange when using --testdata.")
		exit(1)
		tiltangles = np.linspace(tiltrange,-1.*tiltrange,nslices).tolist()
	else:
		print("You must specify --tlt when using --tiltseries")
		exit(1)
	
	
	if options.niters:
		niters = int(options.niters)
	
	if options.beta:
		beta = float(options.beta)
		if beta < 0:
			print("Parameter beta (--beta) must be greater than 0.")
			exit(1)
	
	if options.subpix:
		subpix = int(options.subpix)
	else: 
		subpix = 1
	
	noisiness = 0.0
	if options.noise:
		if options.noisiness != 0.0:
			noisiness = options.noisiness
	
	if options.output:
		outfile = options.output
	
	if options.verbose > 1: print("e2tvrecon.py")
	logger=E2init(sys.argv,options.ppid)
	
	# Create new output directory for this instance
	options = makepath( options, options.path)
	rootpath = os.getcwd()
	options.path = rootpath + "/" + options.path
	
	# Link original data file to output directory
	if options.verbose > 7: print("Linking input data to instance directory...")
	
	if options.testdata != None:
		pathname = os.path.dirname(os.path.abspath( options.testdata ))
		filename = ntpath.basename( options.testdata )
		linkfrom = pathname + "/" + filename
		linkto = options.path + "/" + filename
		os.symlink( linkfrom, linkto )
	
	if options.tiltseries != None:
		pathname = os.path.dirname(os.path.abspath( options.tiltseries ))
		filename = ntpath.basename( options.tiltseries )
		linkfrom = pathname + "/" + filename
		linkto = options.path + "/" + filename
		os.symlink( linkfrom, linkto )
	
	if options.tlt != None:
		pathname = os.path.dirname(os.path.abspath( options.tlt ))
		filename = ntpath.basename( options.tlt )
		linkfrom = pathname + "/" + filename
		linkto = options.path + "/" + filename
		os.symlink( linkfrom, linkto )
	
	# Get image/projection data
	data, dim = get_data( options, nslices, noisiness, imgnum )
	xlen = dim[0]
	
	# Projection operator and projections data
	if options.verbose > 2: print("Building Projection Operator...")
	projection_operator = build_projection_operator( tiltangles, xlen, nslices, None, subpix, 0, None )
	
	if options.tiltseries:
		projections = data.ravel()[:, np.newaxis]
	else:
		projections = projection_operator * data.ravel()[:, np.newaxis]
	
	if options.verbose > 9: print("Writing Projections to Disk... ")
	outpath = options.path + "/" + "projections.hdf"
	for i in range( nslices ):
		from_numpy(projections[i*xlen:(i+1)*xlen]).write_image( outpath, i )
	
	# Reconstruction
	if options.verbose > 2: print("Starting Reconstruction...")
	t1 = time.time()
	recon, energies = fista_tv( options, tiltangles, projections, beta, niters, projection_operator )
	t2 = time.time()
	if options.verbose > 3: print("Reconstruction completed in %s s"%(str(t2-t1)))
	
	# Store reconstruction in instance outfile directory
	outpath = options.path + "/" + outfile
	from_numpy( recon[-1] ).write_image( outpath )
	
	if options.fsc != False:
		if options.verbose > 3: print("Generating an FSC plot...")
		fscpath = options.path + "/" + "fsc.txt"
		datapath = options.testdata
		os.popen("e2proc3d.py %s %s --calcfsc %s"%( outpath, fscpath, datapath ))
	
	E2end(logger)
	if options.verbose > 1: print("Exiting")
	return
コード例 #57
0
ファイル: paridf.py プロジェクト: marcelosalles/parIDF
# # Y representa Yes e será o default e n representa No
if run.lower() == "y" or run.lower() == "":
    energyplusOutput = config['path']['destination'] + '/Output'
    if (os.path.isdir(energyplusOutput)):
        shutil.rmtree(energyplusOutput)
    globName = config['path']['destination'] + '/' + config['path'][
        'filename'] + '*.idf'
    weatherFilename = config['path']['weatherFilename']
    globFiles = glob.glob(globName)
    for outputName in globFiles:
        call(["runenergyplus", outputName, weatherFilename])
    print("*** The IDF files simulations have been successfully completed ***")

    # energyplus -i custom.idd -w weather.epw input.idf
    # Example: energyplus -w weather.epw -r input.idf
    #-x, --expandobjects Run ExpandObjects prior to simulation

    for extension in ['err', 'csv']:
        extensionFolder = energyplusOutput + '/' + extension
        if not os.path.exists(extensionFolder):
            os.makedirs(extensionFolder)
        globName = energyplusOutput + '/' + config['path'][
            'filename'] + '*.' + extension
        for file in glob.glob(globName):
            os.rename(file, extensionFolder + '/' + ntpath.basename(file))

if run.lower() == "n":
    print(
        "*** The IDF files haven't been executed by EnergyPlus, but they are saved in the destination folder ***"
    )
コード例 #58
0
ファイル: PRS_run.py プロジェクト: thaocad/PRS-on-SPARK
    #sc = spark.sparkContext
    sc.setLogLevel("WARN")
    log4jLogger = sc._jvm.org.apache.log4j
    LOGGER = log4jLogger.LogManager.getLogger(__name__)
    LOGGER.warn("Start Reading Files")
    LOGGER.warn("Using these genoytpe files: ")

    for filename in genoFileNames[:min(24, len(genoFileNames))]:
        LOGGER.warn(filename)
    if len(genoFileNames)>23:
        LOGGER.warn("and more...")

    LOGGER.warn("total of {} files".format(str(len(genoFileNames))))
    # 1. Load files
    genodata=sc.textFile(genoFileNamePattern)
    LOGGER.warn("Using the GWAS file: {}".format(ntpath.basename(gwasFiles)))
    gwastable=spark.read.option("header",GWAS_has_header).option("delimiter",GWAS_delim).csv(gwasFiles).cache()
    print("Showing top 5 rows of GWAS file")
    gwastable.show(5)

    # 1.1 Filter GWAS and prepare odds ratio

    maxThreshold=max(thresholds)
    gwasOddsMapMax=filterGWASByP_DF(GWASdf=gwastable, pcolumn=gwas_p, idcolumn=gwas_id, oddscolumn=gwas_or, pHigh=maxThreshold, logOdds=log_or)
    gwasOddsMapMaxCA=sc.broadcast(gwasOddsMapMax).value  # Broadcast the map

    # ### 2. Initial processing

    # at this step, the genotypes are already filtered to keep only the ones in 'gwasOddsMapMax'
    bpMap={"A":"T", "T":"A", "C":"G", "G":"C"}
    tic=time()
コード例 #59
0
def path_leaf(path):
    head, tail = ntpath.split(str(path))
    return tail or ntpath.basename(head)
コード例 #60
0
#Setting the compression to none, by default - snappy
sqlcontext.setConf("spark.sql.parquet.compression.codec", "uncompressed")
#Arguments from user
inputfile = sys.argv[1]
outputfile = sys.argv[2]
RDD_popwords = sc.wholeTextFiles(
    "/bigd29/output_hw2/medium/p1/1", use_unicode=False).map(lambda (
        file, popularWords): popularWords.split("\n")).flatMap(lambda x: x)
List_popwords = RDD_popwords.collect()

#Reading the file
fileData = sc.wholeTextFiles(inputfile, use_unicode=False)

stopwordsList = set(stopwords.words('english'))
fileData_base = fileData.map(lambda (filename, content):
                             (ntpath.basename(filename), content))

#Removing special characters
#Pre-process the words and convert to lower case
fileData_preprocess = fileData_base.map(lambda (file, content): (
    file, re.sub('[^a-z| |0-9-]', '',
                 content.strip().lower())))
#Convert the file to lowercase
fileData_split = fileData_preprocess.map(
    lambda (file, content): (file, content, len(content.split(" "))))
#Finding the data for popular words
file_popwords = fileData_split.map(lambda (file, content, length): [(
    (file, word, length), 1) for word in content.split(
        " ") if word in List_popwords and len(word) > 0])
#Converting to a RDD
fileData_RDD = file_popwords.flatMap(lambda met: met)