def handle(self, *args, **options): if len(args) == 0: raise CommandError( "A dpn_object_id is required to execute this task.") dpn_obj_id = args[0] node = Node.objects.get(name=DPN_NODE_NAME) try: entry = RegistryEntry.objects.get(dpn_object_id=dpn_obj_id) if node.name != entry.first_node_name and node not in entry.replicating_nodes.all(): raise CommandError( "Current node is not listed either as first node or replicating node.") correlation_id = str(uuid4()) headers = dict(correlation_id=correlation_id) body = { "protocol": DPN_XFER_OPTIONS, "dpn_object_id": dpn_obj_id } msg = RecoveryInitQuery(headers, body) # save initial workflow action = Workflow( correlation_id=correlation_id, dpn_object_id=dpn_obj_id, action=RECOVERY, node=DPN_NODE_NAME, step=INIT_QUERY ) # handle errors on sending message to network try: msg.send(DPN_BROADCAST_KEY) action.state = SUCCESS except Exception as err: action.state = FAILED action.note = err # save action in DB action.save() # now queue the task responsible to handle the recovery ttl = DPN_MSG_TTL.get('recovery-init-query', DPN_TTL) delay = ttl * 2 choose_node_and_recover.apply_async( (correlation_id, dpn_obj_id, action), countdown=delay ) except RegistryEntry.DoesNotExist: raise CommandError( "The dpn_object_id that you provided does not exists.")
def on_created(self, event): if not event.is_directory: bag_error = False filename = base = os.path.basename(event.src_path) initial_filesize = os.path.getsize(event.src_path) if type(filename) == bytes: filename = filename.decode("utf-8") logger.info("New bag detected: %s. Let's wait 5 seconds and check size again..." % base) while True: # NOTE: how long should we wait to get the final size of the bag?? # discuss this with the team # wait 5 seconds to check bag size again time.sleep(5) try: filesize_now = os.path.getsize(event.src_path) # if initial filesize is equal to the filesize now # we can start the bag ingestion if initial_filesize == filesize_now: filesize = filesize_now break else: initial_filesize = filesize_now print("Bag is not ready, check again in 5 seconds...") except Exception as err: bag_error = err break if bag_error: logger.error("Error processing the new bag %s. Msg -> %s" % (base, bag_error)) elif filesize < DPN_MAX_SIZE: logger.info("Bag looks good. Starting ingestion of %s..." % base) # start ingestion and link task to choose nodes filename_id = os.path.splitext(filename)[0] delay = DPN_MSG_TTL.get("replication-init-query", DPN_TTL) initiate_ingest.apply_async( (filename_id, filesize), link=choose_and_send_location.subtask((), countdown=delay) ) # execute choose_and_send_location task DPN_TTL seconds after # ReplicationInitQuery has been sent to broadcast queue # using countdown parameter of celery task to do that else: logger.info("Bag %s is too big to be replicated. Not ingested!" % base)
def handle(self, *args, **options): start_datetime = self.validate_date(options['startdate']) end_datetime = self.validate_date(options['enddate']) if start_datetime > end_datetime: raise CommandError("Start date must be prior to End Date") headers = { 'correlation_id': str(uuid4()), 'sequence': 0 } body = { 'date_range': [dpn_strftime(start_datetime), dpn_strftime(end_datetime)] } reg_sync = RegistryDateRangeSync(headers, body) reg_sync.send(DPN_BROADCAST_KEY) delay = DPN_MSG_TTL.get("registry-daterange-sync-request", DPN_TTL) solve_registry_conflicts.apply_async(countdown=delay)
def _get_ttl(self): return DPN_MSG_TTL.get(self.directive, DPN_TTL)