print("Estabelecendo conexoes iniciais") print("-" * 80) print("Conectando ao banco de dados...") db = mysql.connector.connect(user=CONFIG['DB']['USER'], password=CONFIG['DB']['PASS'], host=CONFIG['DB']['ENDPOINT'], database=CONFIG['DB']['DATABASE']) if (CONFIG['DOBUCKETUPLOAD']): s3Upload = [] upBucket = [] print("Estabelecendo conexao com os provedores de armazenamento...") for i in range(CONFIG['storage']['numsites']): print(" %s... " % (CONFIG['storage'][i]['PROVIDER'])) tmpS3Connection = getObjectStoreConnection(CONFIG['storage'][i], debug=False) tmpUpBucket = createAndGetObjectStoreBucket(CONFIG['storage'][i], tmpS3Connection, debug=False) s3Upload.append(tmpS3Connection) upBucket.append(tmpUpBucket) print("") print("-" * 80) print("Reparando os arquivos") print("-" * 80) cursor = db.cursor() query = "SELECT COUNT(sha1parte) from partes;" cursor.execute(query) rs = cursor.fetchall() cursor.close()
) print("") print("-" * 80) print("Estabelecendo conexoes iniciais") print("-" * 80) if (CONFIG['DOBUCKETUPLOAD']): s3Upload = [] upBucket = [] print("Estabelecendo conexao com os provedores de armazenamento...") for i in range(CONFIG['storage']['numsites']): print(" %s... " % (CONFIG['storage'][i]['PROVIDER'])) tmpS3Connection = getObjectStoreConnection(CONFIG['storage'][i], debug=False) tmpUpBucket = createAndGetObjectStoreBucket(CONFIG['storage'][i], tmpS3Connection, debug=False) s3Upload.append(tmpS3Connection) upBucket.append(tmpUpBucket) print("") print("-" * 80) print("Atendendo pedidos") print("-" * 80) app.run(host="0.0.0.0", port=8008, use_reloader=True) print("")
hasher = hashlib.sha1() buf = fd_input.read(ifshare_propriedades['chunksize']) hashergeral.update(buf) hasher.update(buf) ifshare_propriedades['hashchunk' + str(chunk)] = hasher.hexdigest() ifshare_propriedades['hashgeral'] = hashergeral.hexdigest() fd_input.close() showMetadata(ifshare_propriedades) fd_output = open(arquivo_saida, "w") writeMetadata(fd_output, ifshare_propriedades) fd_output.close() s3connection = getObjectStoreConnection(CONFIG['Incoming'], debug=False) bucket = createAndGetObjectStoreBucket(CONFIG['Incoming'], s3connection, debug=False) uploader = MultipartUploader(arquivo_raw, ifshare_propriedades['tamanho'], ifshare_propriedades['arquivo'], bucket) uploader.put() uploader = MultipartUploader(arquivo_raw + ".ifshare", os.path.getsize(arquivo_raw + ".ifshare"), ifshare_propriedades['arquivo'] + ".ifshare", bucket, message="Subindo metadados") uploader.put()
print("") print("-" * 80) print("Limpando as coisas") print("-" * 80) print("Removendo a fila de arquivos a processar...") sqsConnection = boto.sqs.connect_to_region( CONFIG['Incoming']['REGION'], aws_access_key_id=CONFIG['Incoming']['ACCESS_KEY'], aws_secret_access_key=CONFIG['Incoming']['SECRET_KEY']) myQueue = sqsConnection.lookup(CONFIG['Incoming']['QUEUE']) if myQueue != None: sqsConnection.delete_queue(myQueue) sqsConnection.close() print("Removendo o bucket de arquivos a processar...") s3Incoming = getObjectStoreConnection(CONFIG['Incoming']) bucket = s3Incoming.lookup(CONFIG['Incoming']['BUCKET']) if bucket != None: for k in bucket.list(): print(".", end='') k.delete() print("") s3Incoming.delete_bucket(CONFIG['Incoming']['BUCKET']) s3Incoming.close() print("Removendo os chunks e buckets nos provedores de armazenamento...") for i in range(CONFIG['storage']['numsites']): tmpS3Connection = getObjectStoreConnection(CONFIG['storage'][i], debug=False) bucket = tmpS3Connection.lookup(str(CONFIG['storage'][i]['BUCKET'])) if bucket != None: