def post(self):

		# Get keyboard input from web page
		keyInput = self.request.get('filekey')
		if keyInput == '':
			self.response.out.write("File Key cannot be empty.")
			return None

		# Return a query object that represents all entities
		fileKeys = FileKey.all()

		# Find the given key
		fileKeys.filter('__key__ =', db.Key.from_path('FileKey', keyInput, parent = fileKeyList()))

		# Delete the file and output onto web page
		if fileKeys.count() == 0:
			self.response.out.write('Key: {}, does not exist.' .format(keyInput))
		else:
			fk = db.get(db.Key.from_path('FileKey', keyInput, parent = fileKeyList()))

			# File stored in Memcache
			if fk.fileLocation == 'memcache':
				memcache.delete(fk.key().id_or_name())
				self.response.out.write('Deleted from Memcache')
			else:
				files.delete(BUCKETPATH + '/' + str(fk.key().id_or_name()))
				self.response.out.write('Deleted from Google Cloud Storage')
			db.delete(db.Key.from_path('FileKey', keyInput, parent = fileKeyList()))
			self.response.out.write('<br />Key: {} removed.' .format(keyInput))
예제 #2
0
    def delete_gs_files(self, backup_info, kind_files):
        """delete files in cloud storage"""
        all_files = []
        for kind_file in kind_files:
            all_files += kind_file.files

        ma = re.match(r'^(.*)\.backup_info$', backup_info.gs_handle)
        if ma:
            prefix = ma.group(1)
        else:
            logging.error('gs file name is not match')
            raise Exception('gs file name is not match')

        for kind in backup_info.kinds:
            all_files.append(prefix + '.' + kind + '.backup_info')

        all_files.append(backup_info.gs_handle)

        delete_files = []
        for file_name in all_files:
            delete_files.append(file_name)
            if len(delete_files) == 100:
                files.delete(*delete_files)
                delete_files = []
        if delete_files:
            files.delete(*delete_files)
예제 #3
0
def borrarImg_cloud(img):
	if img.blobkeygs:
		filenom='/gs/'+BUCKET +'/'+str(img.key.parent().id())+"/"+img.nombre
		#try:
		files.delete(filenom)
		images.delete_serving_url(img.blobkeygs)
		#except:
		#	pass
	img.key.delete()
  def run(self, temp_files):
    for shard in temp_files:
      for filename in shard:

        for _ in range(10):
          try:
            files.delete(filename)
            break
          except:
            pass
예제 #5
0
    def run(self, temp_files):
        for shard in temp_files:
            for filename in shard:

                for _ in range(10):
                    try:
                        files.delete(filename)
                        break
                    except:
                        pass
예제 #6
0
 def delete_file_or_list(self, filename_or_list):
   if isinstance(filename_or_list, list):
     for filename in filename_or_list:
       self.delete_file_or_list(filename)
   else:
     filename = filename_or_list
     for _ in range(10):
       try:
         files.delete(filename)
         break
       except:
         pass
예제 #7
0
 def delete_file_or_list(self, filename_or_list):
   if isinstance(filename_or_list, list):
     for filename in filename_or_list:
       self.delete_file_or_list(filename)
   else:
     filename = filename_or_list
     for _ in range(10):
       try:
         files.delete(filename)
         break
       except:
         pass
예제 #8
0
 def post(self):
   filekeys = FileKey.all()
   self.response.out.write("<b>Removed all</b>:</br>")
   for filekey in filekeys:
     self.response.out.write(filekey.key().id_or_name())
     if filekey.filelocation == "memcache":
       memcache.delete(filekey.key().id_or_name())
     else:
       files.delete(BUCKET_PATH+"/"+str(filekey.key().id_or_name()))
     self.response.out.write('</br>')
   for filekey in filekeys:
     db.delete(filekey.key())
예제 #9
0
 def post(self):
   fkeystr = self.request.get("filekey")
   filekeys = FileKey.all()
   thekey = db.Key.from_path("FileKey", fkeystr, parent=filelist_key())
   filekeys.filter('__key__ =', thekey)
   if filekeys.count() == 0:
     self.response.out.write("Key(%s) does NOT exists." % fkeystr)
   else:
     f = db.get(thekey)
     if f.filelocation == "memcache":
       memcache.delete(f.key().id_or_name())
       self.response.out.write("Deleted from Memcache</br>")
     else:
       files.delete(BUCKET_PATH+"/"+f.key().id_or_name())
       self.response.out.write("Deleted from Google Cloud Storage</br>")
     db.delete(thekey)
     self.response.out.write("Key(%s) removed." % fkeystr)
예제 #10
0
 def post(self):
     fkstring = self.request.get("filekey")
     removeKey = db.Key.from_path("FileKey", fkstring, parent=filelist_key())
     allFileKeys = FileKey.all()
     allFileKeys.filter("__key__ =", removeKey)
     if allFileKeys.count() != 0:
         f = db.get(removeKey)
         if f.filelocation == "memcache":
             memcache.delete(f.key().id_or_name())
             self.response.out.write("<br>Deleted %s from MEMCACHE</br>" % fkstring)
         else:
             files.delete(BUCKET_PATH + "/" + f.key().id_or_name())
             self.response.out.write("<br>Deleted %s from GOOGLE CLOUD STORAGE</br>" % fkstring)
         db.delete(removeKey)
         self.response.out.write("""<br><br><b><a href="/">RETURN TO HOME</a></b>""")
     else:
         self.response.out.write("ERROR!!! The file key '%s' is not in the list." % fkstring)
         self.response.out.write("""<br><br><b><a href="/">RETURN TO HOME</a></b>""")
 def post(self):
   fkstring = self.request.get("filekey")
   removeKey = db.Key.from_path("FileKey", fkstring, parent=filelist_key())
   allFileKeys = FileKey.all()
   allFileKeys.filter('__key__ =', removeKey)
   if allFileKeys.count() != 0:
     f = db.get(removeKey)
     if f.filelocation == "memcache":
       memcache.delete(f.key().id_or_name())
       self.response.out.write("<br>Deleted %s from MEMCACHE</br>" % fkstring)
     else:
       files.delete(BUCKET_PATH+"/"+f.key().id_or_name())
       self.response.out.write("<br>Deleted %s from GOOGLE CLOUD STORAGE</br>" % fkstring)
     db.delete(removeKey)
     self.response.out.write("""<br><br><b><a href="/">RETURN TO HOME</a></b>""")
   else:
     self.response.out.write("ERROR!!! The file key '%s' is not in the list." % fkstring)
     self.response.out.write("""<br><br><b><a href="/">RETURN TO HOME</a></b>""")
예제 #12
0
	def post(self):
		
		# Return a query object that represents all entities
		fileKeys = FileKey.all()

		self.response.out.write('<b>Removed All:</b>')

		# Delete and output onto the web page
		for fileKey in fileKeys:
			self.response.out.write('<br />' + str(fileKey.key().id_or_name()))
			
			# Delete keys in Memcache
			if fileKey.fileLocation == 'memcache':
				memcache.delete(fileKey.key().id_or_name())
			# Delete keys in Google Cloud Storage
			else:
				files.delete(BUCKETPATH + '/' + str(fileKey.key().id_or_name()))

		# Delete keys
		for fileKey in fileKeys:
			db.delete(fileKey.key())
예제 #13
0
def delete_object(obj):
    """Deletes an object from cloud storage."""
    files.delete(_appengine_object_path(obj))
예제 #14
0
파일: gcs_storage.py 프로젝트: uri247/frl
 def delete(self, name):
     full_name = '/gs/%s/%s' % (self.bucket_name, name)
     files.delete(full_name)
예제 #15
0
    def _clean_mapreduce(cls, max_age):
        """Separated as internal function to permit tests to pass max_age."""
        num_cleaned = 0

        # If job has a start time before this, it has been running too long.
        min_start_time_datetime = datetime.datetime.utcnow() - max_age
        min_start_time_millis = int(
            (min_start_time_datetime -
             datetime.datetime(1970, 1, 1)).total_seconds() * 1000)

        # Iterate over all namespaces in the installation
        for course_context in sites.get_all_courses():
            with Namespace(course_context.get_namespace_name()):

                # Index map/reduce jobs in this namespace by pipeline ID.
                jobs_by_pipeline_id = {}
                for job_class in data_sources.Registry.get_generator_classes():
                    if issubclass(job_class, jobs.MapReduceJob):
                        job = job_class(course_context)
                        pipe_id = jobs.MapReduceJob.get_root_pipeline_id(
                            job.load())
                        jobs_by_pipeline_id[pipe_id] = job

                # Clean up pipelines
                for state in pipeline.get_root_list()['pipelines']:
                    pipeline_id = state['pipelineId']
                    job_definitely_terminated = (
                        state['status'] == 'done'
                        or state['status'] == 'aborted'
                        or state['currentAttempt'] > state['maxAttempts'])
                    have_start_time = 'startTimeMs' in state
                    job_started_too_long_ago = (
                        have_start_time
                        and state['startTimeMs'] < min_start_time_millis)

                    if (job_started_too_long_ago or
                        (not have_start_time and job_definitely_terminated)):
                        # At this point, the map/reduce pipeline is
                        # either in a terminal state, or has taken so long
                        # that there's no realistic possibility that there
                        # might be a race condition between this and the
                        # job actually completing.
                        if pipeline_id in jobs_by_pipeline_id:
                            jobs_by_pipeline_id[pipeline_id].mark_cleaned_up()

                        p = pipeline.Pipeline.from_id(pipeline_id)
                        if p:
                            # Pipeline cleanup, oddly, does not go clean up
                            # relevant blobstore items.  They have a TODO,
                            # but it has not been addressed as of Sep 2014.
                            # pylint: disable=protected-access
                            root_key = db.Key.from_path(
                                pipeline_models._PipelineRecord.kind(),
                                pipeline_id)
                            for path in cls._collect_blobstore_paths(root_key):
                                files.delete(path)

                            # This only enqueues a deferred cleanup item, so
                            # transactionality with marking the job cleaned is
                            # not terribly important.
                            p.cleanup()
                        num_cleaned += 1
        return num_cleaned
예제 #16
0
    def _clean_mapreduce(cls, max_age):
        """Separated as internal function to permit tests to pass max_age."""
        num_cleaned = 0

        # If job has a start time before this, it has been running too long.
        min_start_time_datetime = datetime.datetime.utcnow() - max_age
        min_start_time_millis = int(
            (min_start_time_datetime - datetime.datetime(1970, 1, 1))
            .total_seconds() * 1000)

        # Iterate over all namespaces in the installation
        for course_context in sites.get_all_courses():
            with Namespace(course_context.get_namespace_name()):

                # Index map/reduce jobs in this namespace by pipeline ID.
                jobs_by_pipeline_id = {}
                for job_class in data_sources.Registry.get_generator_classes():
                    if issubclass(job_class, jobs.MapReduceJob):
                        job = job_class(course_context)
                        pipe_id = jobs.MapReduceJob.get_root_pipeline_id(
                            job.load())
                        jobs_by_pipeline_id[pipe_id] = job

                # Clean up pipelines
                for state in pipeline.get_root_list()['pipelines']:
                    pipeline_id = state['pipelineId']
                    job_definitely_terminated = (
                        state['status'] == 'done' or
                        state['status'] == 'aborted' or
                        state['currentAttempt'] > state['maxAttempts'])
                    have_start_time = 'startTimeMs' in state
                    job_started_too_long_ago = (
                        have_start_time and
                        state['startTimeMs'] < min_start_time_millis)

                    if (job_started_too_long_ago or
                        (not have_start_time and job_definitely_terminated)):
                        # At this point, the map/reduce pipeline is
                        # either in a terminal state, or has taken so long
                        # that there's no realistic possibility that there
                        # might be a race condition between this and the
                        # job actually completing.
                        if pipeline_id in jobs_by_pipeline_id:
                            jobs_by_pipeline_id[pipeline_id].mark_cleaned_up()

                        p = pipeline.Pipeline.from_id(pipeline_id)
                        if p:
                            # Pipeline cleanup, oddly, does not go clean up
                            # relevant blobstore items.  They have a TODO,
                            # but it has not been addressed as of Sep 2014.
                            # pylint: disable=protected-access
                            root_key = db.Key.from_path(
                                pipeline_models._PipelineRecord.kind(),
                                pipeline_id)
                            for path in cls._collect_blobstore_paths(root_key):
                                files.delete(path)

                            # This only enqueues a deferred cleanup item, so
                            # transactionality with marking the job cleaned is
                            # not terribly important.
                            p.cleanup()
                        num_cleaned += 1
        return num_cleaned
예제 #17
0
def remove(key):
    path = BUCKET + "/" + key
    files.delete(path)
    if MEMCACHED_ENABLED:
        memcache.delete(key)
예제 #18
0
def delete_object(obj):
    """Deletes an object from cloud storage."""
    files.delete(_appengine_object_path(obj))