Пример #1
0
def backupToZip(folder):
	# Backup the entire contents of "folder" into a ZIP file. 

	folder = os.path.abspath(folder)	# make sure folder is absolute

	# Figure out the filename this code should use based
	# on what files already exist. 
	number = 1
	while True:
		zipFilename = os.path.basename(folder) + '_' + str(number) + '.zip'
		if not os.path.exists(zipFilename):
			break
		number = number + 1
		

	# Create zip file
	print('Creating %s...' % (zipFilename))
	backupZip = zipfile.zipFile(zipFilename, 'w')


	# Walk the entire folder tree and compress the files in each folder.
	for foldername, subfolders, filenames in os.walk(folder):
		print('Adding files in %s...' % (foldername))
		# Add the current folder to the ZIP file. 
		backupZip.write(foldername)

		#Add all the files in this folder to the ZIP file. 
		for filename in filenames:
			newBase / os.path.basename(folder) + '_'
			if filename.startswith(newBase) and filename.endwith('.zip'):
				continue 	# don't backup the backup ZIP files
			backupZip.write(os.path.join(foldername,filename))

	backupZip.close()	
	print('Done')
Пример #2
0
    def generateMetagenesList(self, ROOT_DIRECTORY, clusterNumber, omicList=None):
        """
        This function obtains the metagenes for each pathway in KEGG based on the input values.

        @param {type}
        @returns
        """
        # STEP 1. EXTRACT THE COMPRESSED FILE WITH THE MAPPING FILES
        zipFile(self.getOutputDir() + "/mapping_results_" + self.getJobID() + ".zip").extractall(path=self.getTemporalDir())

        # STEP 2. GENERATE THE DATA FOR EACH OMIC DATA TYPE
        filtered_omics = self.geneBasedInputOmics

        if omicList:
            filtered_omics = [inputOmic for inputOmic in self.geneBasedInputOmics if inputOmic.get("omicName") in omicList]

        for inputOmic in filtered_omics:
            try:
                # STEP 2.1 EXECUTE THE R SCRIPT
                logging.info("GENERATING METAGENES INFORMATION...CALLING")
                inputFile = self.getTemporalDir() +  "/" + inputOmic.get("omicName") + '_matched.txt'
                # Select number of clusters, default to dynamic
                kClusters = str(dict(clusterNumber).get(inputOmic.get("omicName"), "dynamic"))
                check_call([
                    ROOT_DIRECTORY + "common/bioscripts/generateMetaGenes.R",
                    '--specie="' + self.getOrganism() +'"',
                    '--input_file="' + inputFile  + '"',
                    '--output_prefix="'+ inputOmic.get("omicName") + '"',
                    '--data_dir="'+ self.getTemporalDir() + '"',
                    '--kegg_dir="'+ KEGG_DATA_DIR + '"',
                    '--sources_dir="' + ROOT_DIRECTORY + '/common/bioscripts/"',
                    '--kclusters="' + kClusters + '"' if kClusters.isdigit() else ''], stderr=STDOUT)
                # STEP 2.2 PROCESS THE RESULTING FILE

                # Reset all pathways metagenes for the omic
                map(lambda pathway: pathway.resetMetagenes(inputOmic.get("omicName")), self.matchedPathways.values())

                with open(self.getTemporalDir() + "/" + inputOmic.get("omicName") + "_metagenes.tab", 'rU') as inputDataFile:
                    for line in csv_reader(inputDataFile, delimiter="\t"):
                        if self.matchedPathways.has_key(line[0]):
                            self.matchedPathways.get(line[0]).addMetagenes(inputOmic.get("omicName"), {"metagene": line[1], "cluster": line[2], "values" : line[3:] })
                inputDataFile.close()
            except CalledProcessError as ex:
                logging.error("STEP2 - Error while generating metagenes information for " + inputOmic.get("omicName"))

        os_system("mv " + self.getTemporalDir() +  "/" + "*.png " +  self.getOutputDir())
        return self
Пример #3
0
def Archive_Controller(arg):
    cwd = os.getcwd()

    CSV_Write()

    # Create archive directory for current time
    Current_time = datetime.datetime.now()
    directory = '/home/pi/datalogger/Archive/'
    if not os.path.exists(directory):
        os.makedirs(directory)

    # Create archive
    zf_name = directory + str(Current_time) + '_Archive.zip'
    #zf = zipfile.ZipFile(zf_name,'w')

    # Write files to zip archive and compress
    try:
        with zipfile.zipFile(zf_name, 'w') as zf:
            for folder, subfolders, files in os.walk(cwd):
                for file in files:
                    if file.endswith('.log') or file.endswith(
                            '.db') or file.endswith('.csv'):
                        zf.write(os.path.join(folder, file),
                                 directory,
                                 compress_type=zipfile.ZIP_DEFLATED)
            #zf.write('UPS_DB.sql', zipfile.ZIP_STORED)# Write sql database to zip file
            #zf.write('UPS_Messages.log', zipfile.ZIP_STORED)# Write log to zip file
            #zf.write('UPS_DB.csv', zipfile.ZIP_STORED)# Write csv file to zip file
        zf.close()

    except:
        logger.error('Could not write files to zip archive')

    try:
        os.remove('UPS_Messages.log')  # Delete log file
        os.remove('UPS_DB.csv')  # Delete csv file
    except:
        logger.error('Could not delete log and csv files')

    try:

        conn = sqlite3.connect('UPS_DB.db')
        c = conn.cursor()
        c.execute("DELETE FROM UPS_DB WHERE Date <= date('now','-1 day')"
                  )  # Delete sql database older than one week
        conn.close()
    except:
        logger.error('Could not update SQL database')
import zipfile 
import os 

# Definir la ruta de carpeta a analizar 
carpeta = "c:\Users\juansebastian\Documents\Tareas"
# Crear un objeto zipFile y agreegar la ruta donde se guardara 
zip = zipfile.zipFile('compress.zip', 'w')

# Definir las extensiones de los archivos a comprimir 
matchList = ['.png', '.txt', 'wmv', '.py']

# Recorrer los archivos de la carpeta 
for archivo in os.listdir(carpeta):
	for ext in matchList:
		# Si la extension conicide la agrega al zip 
		if archivo.endswith(ext):
			zip.write(os.path.join(carpeta, archivo))

# Cerrar el archivo para guardarlo 
zip.close()
Пример #5
0
def main():
    # Set variables that are global (mostly for writing not neded for reading)
    global timeStart

    # Get user input
    try:
        inputArray = getInputJSON()
        inputKey = inputArray[0]
        inputData = json.loads(inputArray[1])
        inputValue = inputData.get(inputKey)
    except KeyboardInterrupt:
        print("")
        exit(0)


    # Imput conformation
    conf = getConformation(f'\nThe key "{inputKey}" will be set to '+str(inputValue).replace("'",'"')+'. This will result in:\n'+str(inputData).replace("'",'"')+'\n\nAre you sure this is what you want?')

    if conf == False:
        print('Exiting...')
        exit(0)
    print("\n")

    # Start timer
    timeStart = time.time()


    # Get zips in current and lower dirs
    allZips = searchDirFor('./', '', '.zip')
    lenAllZips = len(allZips)
    displayArray(allZips, f'Found {lenAllZips} zip'+ ('s' if lenAllZips != 1 else ''))


    # Gets temporary directory
    tempDir = './temp'
    while os.path.isdir(tempDir) == 1:
        print(f'{timeStamp(timeStart)} Diractory "{tempDir}/" already exists, looking for new temp directory')
        tempDir += str(random.randint(0,9))


    # Processes all the zip files if found
    failed = [['Operation', 'File', 'Error type', 'Error value']]

    if lenAllZips > 0:
        print(f'{timeStamp(timeStart)} Using temporary directory: "{tempDir}/"')

        for i in range(lenAllZips):
            # Get temporary current directory
            tempCurrentDir = os.path.join(tempDir, getFileName(allZips[i]))

            # Extract currently processing zip file
            print(f'\n{timeStamp(timeStart)} Extracting zip {i+1}: "{allZips[i]}" -> "{tempCurrentDir}/"', end=printEnd())
            with zipFile(allZips[i], 'r') as zip:
                try:
                    zip.extractall(tempCurrentDir)
                except Exception as e:
                    failed.append(["extracting", allZips[i], type(e).__name__, str(e)])
                finally:
                    zip.close()
            print('done!')

            # Get JSON files of extracted zip
            allJsons = searchDirFor(tempCurrentDir, '', '.json')
            lenAllJsons = len(allJsons)
            displayArray(allJsons, f'Found {lenAllJsons} json'+ ('s' if lenAllJsons != 1 else ''))

            if lenAllJsons > 0:
                # Look threw all JSONs and replace specefied thing
                didJsonsChange = 0
                for j in range(lenAllJsons):
                    try:
                        hasJsonChanged = jsonChangeValue(allJsons[j], inputKey, inputValue)
                        if hasJsonChanged == 1:
                            didJsonsChange = 1
                    except Exception as e:
                        print("failed!")
                        failed.append(["changing json", allJsons[j], type(e).__name__, str(e)])

                # Rezip extracted zip if something changed
                if didJsonsChange == 1:
                    print(f'{timeStamp(timeStart)} Writing: "{tempCurrentDir}/*" -> "{allZips[i]}"', end=printEnd())
                    with zipFile(allZips[i], 'w') as zip:
                        try:
                            for j in os.listdir(tempCurrentDir):
                                zip.write(os.path.join(tempCurrentDir, j), arcname=j)
                        except Exception as e:
                            failed.append(["zipping", os.path.join(tempCurrentDir, j), type(e).__name__, str(e)])
                        finally:
                            zip.close()
                    print('done!')

            # Remove temp dir for zip
            print(f'{timeStamp(timeStart)} Removing: "{tempCurrentDir}"', end=printEnd())
            shutil.rmtree(tempCurrentDir)
            print('done!')


        # Remove temp dir fully
        print(f'\n{timeStamp(timeStart)} Removing the temp directory: "{tempDir}"', end=printEnd())
        try:
            os.rmdir(tempDir)
            print('done!')
        except OSError as e:
            if e.errno == 41:
                # Direcory not empty. Removing it with remaining tree
                shutil.rmtree(tempDir)
            else:
                print('failed!')
                failed.append(["removing", tempDir, type(e).__name__, str(e)])

    print('\n'*2+'-='*5, 'SCRIPT FINISHED', '=-'*5+'\n')
    lenFailed = len(failed)

    if lenFailed > 1:
        temp = 'S' if lenFailed == 1 else ''
        print(f'WITH ERROR{temp}'+'=-'*4)
        for i in range(lenFailed-1):
            print(f'\tGot {failed[i+1][2]} error "{failed[i+1][3]}" while {failed[i+1][0]} file "{failed[i+1][1]}"\n')

    print(f'\nExecution of script took: {round(time.time()-timeStart, 6)} seconds\n')
x = layers.Dropout(0.2)(x)
#输出层
x = layers.Dense(1, activation='sigmoid')(x)

model = Model(pre_trained_model.input, x)  #创建模型

model.compile(optimizer=RMSprop(lr=0.0001),
              loss='binary_crossentropy',
              metrics=['acc'])  #设置优化算法,损失函数,评价指标

from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile

local_zip = '//tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.zipFile(local_zip, 'r')
zip_ref.extractall('tmp')
zip_ref.close()

#数据文件
base_dir = '/tmp/cats_and_dogs_filtered'

train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')

train_cats_dir = os.path.join(train_dir, 'cats')  #训练集-猫目录
train_dogs_dir = os.path.join(train_dir, 'dogs')  #训练集-狗目录
validation_cats_dir = os.path.join(validation_dir, 'cats')  #开发集-猫目录
validation_dogs_dir = os.path.join(validation_dir, 'dogs')  #开发集-狗目录

train_cat_fnames = os.listdir(train_cats_dir)