def create_manifest(name, data_type, description): file = "manifest.json" with open(file, "w") as f: # Contents of the manifest for behaviour/resource packs contents = '{{\n\t"format_version": 1,\n\t"header": {{{}\ \n\t\t"name": "{}",\n\t\t"uuid": "{}",\n\t\t"version": [ 0, 0, 1 ]\ \n\t}},\n\t"modules": [\n\t\t{{\n\t\t\t"type": "{}",\n\t\t\t"uuid": "{}",\ \n\t\t\t"version": [ 0, 0, 1 ]\n\t\t}}\n\t]\n}}' f.write(contents.format(description, name, new_uuid(), data_type, new_uuid())) # Creates a zipped folder and copies manifest.json inside make_archive(name, "zip", getcwd(), file) # Removes the old manifest.json copy remove(file) # Renames the zip file to .mcpack if os_path.exists(name + ".mcpack"): num = 1 while os_path.exists(name + str(num) + ".mcpack"): num += 1 rename(name + ".zip", name + str(num) + ".mcpack") else: rename(name + ".zip", name + ".mcpack") # Prints a success message print("New {} pack manifest successfully created!\n".format(data_type))
def test_session_removal_is_complete(self): sc = Scheduler(new_uuid()) start, end = datetime(2013, 9, 27, 9, 0), datetime(2013, 9, 27, 10) sc.allocate(dates=(start, end)) session_id = new_uuid() token = sc.reserve( reservation_email, (start, end), session_id=session_id ) self.assertEqual(db.Session.query(db.Reservation).count(), 1) self.assertEqual(db.Session.query(db.Allocation).count(), 1) self.assertEqual(db.Session.query(db.ReservedSlot).count(), 0) sc.approve_reservation(token) self.assertEqual(db.Session.query(db.Reservation).count(), 1) self.assertEqual(db.Session.query(db.Allocation).count(), 1) self.assertEqual(db.Session.query(db.ReservedSlot).count(), 1) db.remove_expired_reservation_sessions( utils.utcnow() + timedelta(seconds=15*60) ) self.assertEqual(db.Session.query(db.Reservation).count(), 0) self.assertEqual(db.Session.query(db.Allocation).count(), 1) self.assertEqual(db.Session.query(db.ReservedSlot).count(), 0)
def test_add_reserved_slot(scheduler): allocation = Allocation(raster=15, resource=scheduler.resource) allocation.start = datetime(2011, 1, 1, 15, tzinfo=utc) allocation.end = datetime(2011, 1, 1, 15, 59, tzinfo=utc) allocation.group = new_uuid().hex allocation.mirror_of = scheduler.resource reservation = new_uuid() slot = ReservedSlot(resource=allocation.resource) slot.start = allocation.start slot.end = allocation.end slot.allocation = allocation slot.reservation = reservation # Ensure that the same slot cannot be doubly used another = ReservedSlot(resource=allocation.resource) another.start = allocation.start another.end = allocation.end another.allocation = allocation another.reservation = reservation scheduler.session.add(allocation) scheduler.session.add(slot) scheduler.session.add(another) with pytest.raises(IntegrityError): scheduler.session.flush()
def new_test_scheduler(dsn, context=None, name=None): context = context or new_uuid().hex name = name or new_uuid().hex context = registry.register_context(context, replace=True) context.set_setting('dsn', dsn) return new_scheduler(context=context, name=name, timezone='Europe/Zurich')
def cache_version(self): last_fetch = (self._last_fetch or datetime.utcnow()) cache_age = (datetime.utcnow() - last_fetch).total_seconds() if self._cache_version is None: self._cache_version = new_uuid().hex if cache_age > self.cache_lifetime: self._cache_version = new_uuid().hex return self._cache_version + self.url + str(self.cache_lifetime)
def __init__(self, name="Default workflow", uuid=None, curr_obj=0, workflow_object=None, user_id=0, module_name="Unknown"): self.db_obj = None if isinstance(workflow_object, Workflow): self.db_obj = workflow_object else: # If uuid is defined we try to get the db object from DB. if uuid is not None: self.db_obj = \ Workflow.query.filter(Workflow.uuid == uuid).first() else: uuid = new_uuid() if self.db_obj is None: self.db_obj = Workflow(name=name, user_id=user_id, current_object=curr_obj, module_name=module_name, uuid=uuid) self._create_db_obj() super(BibWorkflowEngine, self).__init__() self.add_log()
def test_whole_day(): allocation = Allocation( raster=15, resource=new_uuid(), timezone='Europe/Zurich' ) # the whole-day is relative to the allocation's timezone allocation.start = datetime(2013, 1, 1, 23, 0, tzinfo=utc) allocation.end = datetime(2013, 1, 2, 23, 0, tzinfo=utc) assert allocation.whole_day allocation.start = datetime(2013, 1, 1, 23, 0, tzinfo=utc) allocation.end = datetime(2013, 1, 2, 22, 59, 59, 999999, tzinfo=utc) assert allocation.whole_day allocation.start = datetime(2013, 1, 1, 23, 0, tzinfo=utc) allocation.end = datetime(2013, 1, 2, 22, 59, 59, 999999, tzinfo=utc) assert allocation.whole_day allocation.start = datetime(2013, 1, 1, 15, 0, tzinfo=utc) allocation.end = datetime(2013, 1, 1, 0, 0, tzinfo=utc) with pytest.raises(AssertionError): allocation.whole_day
def test_group_reserve(self): sc = Scheduler(new_uuid()) dates = [ (datetime(2013, 4, 6, 12, 0), datetime(2013, 4, 6, 16, 0)), (datetime(2013, 4, 7, 12, 0), datetime(2013, 4, 7, 16, 0)) ] allocations = sc.allocate( dates, grouped=True, approve_manually=True, quota=3 ) self.assertEqual(len(allocations), 2) group = allocations[0].group # reserve the same thing three times, which should yield equal results def reserve(): token = sc.reserve(u'*****@*****.**', group=group) reservation = sc.reservation_by_token(token).one() targets = reservation._target_allocations().all() self.assertEqual(len(targets), 2) sc.approve_reservation(token) targets = reservation._target_allocations().all() self.assertEqual(len(targets), 2) reserve() reserve() reserve()
def test_fragmentation(self): sc = Scheduler(new_uuid()) start = datetime(2011, 1, 1, 15, 0) end = datetime(2011, 1, 1, 16, 0) daterange = (start, end) allocation = sc.allocate(daterange, quota=3)[0] reservation = sc.reserve(reservation_email, daterange) slots = sc.approve_reservation(reservation) self.assertTrue([True for s in slots if s.resource == sc.uuid]) slots = sc.approve_reservation( sc.reserve(reservation_email, daterange) ) self.assertFalse([False for s in slots if s.resource == sc.uuid]) sc.remove_reservation(reservation) slots = sc.approve_reservation( sc.reserve(reservation_email, daterange) ) self.assertTrue([True for s in slots if s.resource == sc.uuid]) self.assertRaises( AffectedReservationError, sc.remove_allocation, allocation.id )
def test_allocation_partition(self): sc = Scheduler(new_uuid()) allocations = sc.allocate( ( datetime(2011, 1, 1, 8, 0), datetime(2011, 1, 1, 10, 0) ), partly_available=True ) allocation = allocations[0] partitions = allocation.availability_partitions() self.assertEqual(len(partitions), 1) self.assertEqual(partitions[0][0], 100.0) self.assertEqual(partitions[0][1], False) start, end = datetime(2011, 1, 1, 8, 30), datetime(2011, 1, 1, 9, 00) token = sc.reserve(reservation_email, (start, end)) sc.approve_reservation(token) partitions = allocation.availability_partitions() self.assertEqual(len(partitions), 3) self.assertEqual(partitions[0][0], 25.00) self.assertEqual(partitions[0][1], False) self.assertEqual(partitions[1][0], 25.00) self.assertEqual(partitions[1][1], True) self.assertEqual(partitions[2][0], 50.00) self.assertEqual(partitions[2][1], False)
def test_partly(self): sc = Scheduler(new_uuid()) allocations = sc.allocate( ( datetime(2011, 1, 1, 8, 0), datetime(2011, 1, 1, 18, 0) ), partly_available=False, approve_manually=False ) self.assertEqual(1, len(allocations)) allocation = allocations[0] self.assertEqual(1, len(list(allocation.all_slots()))) self.assertEqual(1, len(list(allocation.free_slots()))) slot = list(allocation.all_slots())[0] self.assertEqual(slot[0], allocation.start) self.assertEqual(slot[1], allocation.end) slot = list(allocation.free_slots())[0] self.assertEqual(slot[0], allocation.start) self.assertEqual(slot[1], allocation.end) token = sc.reserve( reservation_email, (datetime(2011, 1, 1, 16, 0), datetime(2011, 1, 1, 18, 0)) ) sc.approve_reservation(token) self.assertRaises( AlreadyReservedError, sc.reserve, reservation_email, (datetime(2011, 1, 1, 8, 0), datetime(2011, 1, 1, 9, 0)) )
def set_uuid(self, uuid=None): """ Sets the uuid or obtains a new one """ if uuid is None: uuid = new_uuid() self.uuid = uuid else: self.uuid = uuid
def test_quota_waitinglist(self): sc = Scheduler(new_uuid()) start = datetime(2012, 3, 4, 2, 0) end = datetime(2012, 3, 4, 3, 0) dates = (start, end) # in this example the waiting list will kick in only after # the quota has been filled allocation = sc.allocate(dates, quota=2, approve_manually=True)[0] self.assertEqual(allocation.waitinglist_length, 0) t1 = sc.reserve(reservation_email, dates) t2 = sc.reserve(reservation_email, dates) self.assertEqual(allocation.waitinglist_length, 2) sc.approve_reservation(t1) sc.approve_reservation(t2) self.assertEqual(allocation.waitinglist_length, 0) t3 = sc.reserve(reservation_email, dates) t4 = sc.reserve(reservation_email, dates) self.assertEqual(allocation.waitinglist_length, 2) self.assertRaises(AlreadyReservedError, sc.approve_reservation, t3) self.assertRaises(AlreadyReservedError, sc.approve_reservation, t4)
def test_no_waitinglist(self): sc = Scheduler(new_uuid()) start = datetime(2012, 4, 6, 22, 0) end = datetime(2012, 4, 6, 23, 0) dates = (start, end) allocation = sc.allocate(dates, approve_manually=False)[0] self.assertEqual(allocation.waitinglist_length, 0) # the first reservation kinda gets us in a waiting list, though # this time there can be only one spot in the list as long as there's # no reservation token = sc.reserve(reservation_email, dates) self.assertTrue(sc.reservation_by_token(token).one().autoapprovable) sc.approve_reservation(token) # it is now that we should have a problem reserving self.assertRaises( AlreadyReservedError, sc.reserve, reservation_email, dates ) self.assertEqual(allocation.waitinglist_length, 0) # until we delete the existing reservation sc.remove_reservation(token) sc.reserve(reservation_email, dates)
def __init__(self, name=None, uuid=None, curr_obj=0, workflow_object=None, id_user=0, module_name="Unknown", **kwargs): """Instantiate a new BibWorkflowEngine object. This object is needed to run a workflow and control the workflow, like at which step of the workflow execution is currently at, as well as control object manipulation inside the workflow. You can pass several parameters to personalize your engine, but most of the time you will not need to create this object yourself as the :py:mod:`.api` is there to do it for you. :param name: name of workflow to run. :type name: str :param uuid: pass a uuid to an existing workflow. :type uuid: str :param curr_obj: internal id of current object being processed. :type curr_obj: int :param workflow_object: existing instance of a Workflow object. :type workflow_object: Workflow :param id_user: id of user to associate with workflow :type id_user: int :param module_name: label used to query groups of workflows. :type module_name: str """ super(BibWorkflowEngine, self).__init__() self.db_obj = None if isinstance(workflow_object, Workflow): self.db_obj = workflow_object else: # If uuid is defined we try to get the db object from DB. if uuid is not None: self.db_obj = \ Workflow.get(Workflow.uuid == uuid).first() else: uuid = new_uuid() if self.db_obj is None: self.db_obj = Workflow(name=name, id_user=id_user, current_object=curr_obj, module_name=module_name, uuid=uuid) self.save(status=WorkflowStatus.NEW) if self.db_obj.uuid not in self.log.name: db_handler_obj = BibWorkflowLogHandler(BibWorkflowEngineLog, "uuid") self.log = get_logger(logger_name="workflow.%s" % self.db_obj.uuid, db_handler_obj=db_handler_obj, obj=self) self.set_workflow_by_name(self.db_obj.name) self.set_extra_data_params(**kwargs)
def secret(self): """ The secret used to for the signatures. As long as the secret is not stored anywhere, the signed values all become invalid every time the secret is changed. Currently, that would mean that logged in users would be logged out if the application is restarted. """ return new_uuid().hex
def test_workflow_engine_instantiation(self): """Check the proper init of the Workflow and BibWorkflowEngine.""" from invenio.modules.workflows.models import Workflow from invenio.modules.workflows.engine import BibWorkflowEngine from uuid import uuid1 as new_uuid test_workflow = Workflow(name='test_workflow', uuid=new_uuid(), id_user=0, module_name="Unknown", ) test_workflow_engine = BibWorkflowEngine(name=test_workflow.name, uuid=test_workflow.uuid) self.assertEqual(test_workflow.name, test_workflow_engine.name)
def setUp(self): """Setup tests.""" from invenio.modules.workflows.models import BibWorkflowObject, \ Workflow from uuid import uuid1 as new_uuid self.workflow = Workflow(name='test_workflow', uuid=new_uuid(), id_user=0, module_name="Unknown") self.bibworkflowobject = BibWorkflowObject(workflow=self.workflow) self.create_objects([self.workflow, self.bibworkflowobject])
def setUp(self): """Setup tests.""" from invenio_workflows.models import BibWorkflowObject, \ Workflow from uuid import uuid1 as new_uuid self.workflow = Workflow(name='demo_workflow', uuid=new_uuid(), id_user=0, module_name="Unknown") self.bibworkflowobject = BibWorkflowObject(workflow=self.workflow) self.create_objects([self.workflow, self.bibworkflowobject])
def __init__(self, model=None, name=None, id_user=None, **extra_data): """Handle special case of instantiation of engine.""" # Super's __init__ clears extra_data, which we override to be # model.extra_data. We work around this by temporarily storing it # elsewhere. if not model: model = Workflow(name=name, id_user=id_user, uuid=new_uuid()) model.save(WorkflowStatus.NEW) self.model = model super(WorkflowEngine, self).__init__() self.set_workflow_by_name(self.model.name)
def test_workflow_engine_instantiation(self): """Check the proper init of the Workflow and BibWorkflowEngine.""" from invenio_workflows.models import Workflow from invenio_workflows.engine import BibWorkflowEngine from uuid import uuid1 as new_uuid test_workflow = Workflow(name='demo_workflow', uuid=new_uuid(), id_user=0, module_name="Unknown", ) test_workflow_engine = BibWorkflowEngine(name=test_workflow.name, uuid=test_workflow.uuid) self.assertEqual(test_workflow.name, test_workflow_engine.name)
def test_add_allocation(scheduler): allocation = Allocation(raster=15, resource=scheduler.resource) allocation.start = datetime(2011, 1, 1, 15, tzinfo=utc) allocation.end = datetime(2011, 1, 1, 15, 59, tzinfo=utc) allocation.group = new_uuid().hex allocation.mirror_of = scheduler.resource scheduler.session.add(allocation) scheduler.commit() assert scheduler.session.query(Allocation).count() == 1
def add_toc_heading(self, heading, text, style=None, toc_level=0): has_toc = toc_level is not None and hasattr(self, 'toc') if has_toc: bookmark = new_uuid().hex text = u'{}<a name="{}"/>'.format(text, bookmark) self.story.append(MarkupParagraph(text, style)) if has_toc: self.story[-1].toc_level = toc_level self.story[-1].bookmark = bookmark
def test_session_expiration(self): sc = Scheduler(new_uuid()) session_id = new_uuid() start, end = datetime(2013, 5, 1, 13, 0), datetime(2013, 5, 1, 14) sc.allocate(dates=(start, end), approve_manually=True) sc.reserve(u'*****@*****.**', (start, end), session_id=session_id) created = utils.utcnow() db.Session.query(db.Reservation).filter( db.Reservation.session_id == session_id ).update({'created': created, 'modified': None}) expired = db.find_expired_reservation_sessions(expiration_date=created) self.assertEqual(len(expired), 0) expired = db.find_expired_reservation_sessions( expiration_date=created + timedelta(microseconds=1) ) self.assertEqual(len(expired), 1) db.Session.query(db.Reservation).filter( db.Reservation.session_id == session_id ).update({ 'created': created, 'modified': created + timedelta(microseconds=1) }) expired = db.find_expired_reservation_sessions( expiration_date=created + timedelta(microseconds=1) ) self.assertEqual(len(expired), 0) expired = db.find_expired_reservation_sessions( expiration_date=created + timedelta(microseconds=2) ) self.assertEqual(len(expired), 1)
def test_allocation_overlap(self): sc1 = Scheduler(new_uuid()) sc2 = Scheduler(new_uuid()) start = datetime(2011, 1, 1, 15, 0) end = datetime(2011, 1, 1, 16, 0) sc1.allocate((start, end), raster=15) sc2.allocate((start, end), raster=15) self.assertRaises( OverlappingAllocationError, sc1.allocate, (start, end), raster=15 ) # there's another way this could happen, which is illegal usage # of scheduler.allocate - we stop this befor it hits the database sc = Scheduler(new_uuid()) dates = [ (datetime(2013, 1, 1, 12, 0), datetime(2013, 1, 1, 13, 0)), (datetime(2013, 1, 1, 12, 0), datetime(2013, 1, 1, 13, 0)) ] self.assertRaises(InvalidAllocationError, sc.allocate, dates) dates = [ (datetime(2013, 1, 1, 12, 0), datetime(2013, 1, 1, 13, 0)), (datetime(2013, 1, 1, 13, 0), datetime(2013, 1, 1, 14, 0)) ] self.assertRaises(InvalidAllocationError, sc.allocate, dates) dates = [ (datetime(2013, 1, 1, 12, 0), datetime(2013, 1, 1, 13, 0)), (datetime(2013, 1, 1, 13, 15), datetime(2013, 1, 1, 14, 0)) ] sc.allocate(dates)
async def send_pack(context, data_type, name, description): # The description is a tuple of strings, from the end of the arguments description = " ".join(description) file = "manifest.json" with open(file, "w") as f: # Contents of the manifest for behaviour/resource packs contents = '{{\n\t"format_version": 1,\n\t"header": {{{}\ \n\t\t"name": "{}",\n\t\t"uuid": "{}",\n\t\t"version": [ 0, 0, 1 ]\n\t}},\ \n\t"modules": [\n\t\t{{\n\t\t\t"type": "{}",\n\t\t\t"uuid": "{}",\n\t\t\t\ "version": [ 0, 0, 1 ]\n\t\t}}\n\t]\n}}' f.write(contents.format(description, name, new_uuid(), data_type, new_uuid())) # Creates a zipped folder and copies manifest.json inside make_archive(name, "zip", getcwd(), file) # Removes the old manifest.json copy remove(file) # Renames the zip file to .mcpack pack_name = name + ".mcpack" rename(name + ".zip", pack_name) # DMs the file straight to the user try: file = await mcpacker.send_file(context.message.author, pack_name) print(str(file)) await mcpacker.add_reaction(file, "\u274c") # If the user does not accept DMs from people within the server # The file cannot be DMed and this message is displayed except discord.errors.Forbidden: await no_dm(context.message.author.mention) remove(name + ".mcpack") # Deletes the file when the :x: emoji is added as a reaction await mcpacker.wait_for_reaction(emoji="\u274c", user=context.message.author) await mcpacker.delete_message(file)
def __init__(self, model=None, name=None, id_user=None, **extra_data): """Special handling of instantiation of engine.""" # Super's __init__ clears extra_data, which we override to be # model.extra_data. We work around this by temporarily storing it # elsewhere. if not model: model = Workflow( name=name, id_user=id_user, uuid=new_uuid() ) model.save(WorkflowStatus.NEW) self.model = model super(WorkflowEngine, self).__init__() self.set_workflow_by_name(self.model.name)
def test_userlimits(self): # ensure that no user can make a reservation for more than 24 hours at # the time. the user acutally can't do that anyway, since we do not # offer start / end dates, but a day and two times. But if this changes # in the future it should throw en error first, because it would mean # that we have to look at how to stop the user from reserving one year # with a single form. start = datetime(2011, 1, 1, 15, 0) end = start + timedelta(days=1) sc = Scheduler(new_uuid()) self.assertRaises( ReservationTooLong, sc.reserve, reservation_email, (start, end) )
def _upload_image(request): assert request is not None if not request.files['image']: raise HttpException(code=400, reason='File not specified') image_data = request.files['image'] filename = str(new_uuid()) + '.png' upload_path = os.path.join(app.config['UPLOAD_DIR'], filename) image = Image.open(image_data.stream) image.save(upload_path) return filename
def test_invalid_reservation(self): sc = Scheduler(new_uuid()) # try to reserve aspot that doesn't exist astart = datetime(2012, 1, 1, 15, 0) aend = datetime(2012, 1, 1, 16, 0) adates = (astart, aend) rstart = datetime(2012, 2, 1, 15, 0) rend = datetime(2012, 2, 1, 16, 0) rdates = (rstart, rend) sc.allocate(dates=adates, approve_manually=True) self.assertRaises( InvalidReservationError, sc.reserve, reservation_email, rdates )
def test_waitinglist(self): sc = Scheduler(new_uuid()) start = datetime(2012, 2, 29, 15, 0) end = datetime(2012, 2, 29, 19, 0) dates = (start, end) # let's create an allocation with a waitinglist allocation = sc.allocate(dates, approve_manually=True)[0] self.assertEqual(allocation.waitinglist_length, 0) # reservation should work approval_token = sc.reserve(reservation_email, dates) self.assertFalse( sc.reservation_by_token(approval_token).one().autoapprovable ) self.assertTrue(allocation.is_available(start, end)) self.assertEqual(allocation.waitinglist_length, 1) # as well as it's approval sc.approve_reservation(approval_token) self.assertFalse(allocation.is_available(start, end)) self.assertEqual(allocation.waitinglist_length, 0) # at this point we can only reserve, not approve waiting_token = sc.reserve(reservation_email, dates) self.assertRaises( AlreadyReservedError, sc.approve_reservation, waiting_token ) self.assertEqual(allocation.waitinglist_length, 1) # try to illegally move the allocation now self.assertRaises( AffectedReservationError, sc.move_allocation, allocation.id, start + timedelta(days=1), end + timedelta(days=1) ) # we may now get rid of the existing approved reservation sc.remove_reservation(approval_token) self.assertEqual(allocation.waitinglist_length, 1) # which should allow us to approve the reservation in the waiting list sc.approve_reservation(waiting_token) self.assertEqual(allocation.waitinglist_length, 0)
def test_no_bleed(self): """ Ensures that two allocations close to each other are not mistaken when using scheduler.reserve. If they do then they bleed over, hence the name. """ sc = Scheduler(new_uuid()) d1 = (datetime(2011, 1, 1, 15, 0), datetime(2011, 1, 1, 16, 0)) d2 = (datetime(2011, 1, 1, 16, 0), datetime(2011, 1, 1, 17, 0)) a1 = sc.allocate(d1)[0] a2 = sc.allocate(d2)[0] self.assertFalse(a1.overlaps(*d2)) self.assertFalse(a2.overlaps(*d1)) # expect no exceptions sc.reserve(reservation_email, d2) sc.reserve(reservation_email, d1)
def add_layout_row(self, cover): layout = cover.cover_layout and json.loads(cover.cover_layout) or [] id = new_uuid().hex layout.append( {"type": "row", "children": [{ "data": {"layout-type": "column", "column-size": 16}, "type": "group", "children": [{ "tile-type": "seantis.cover.people.memberlist", "type": "tile", "id": id }], "roles": ["Manager"] }]} ) cover.cover_layout = json.dumps(layout) cover.reindexObject() return 'seantis.cover.people.memberlist/{}'.format(id)
def url_upload(user_id, deposition_type, uuid, url, name=None, size=None): try: data = urlopen(url).read() except URLError: return "Error" CFG_USER_WEBDEPOSIT_FOLDER = create_user_file_system( user_id, deposition_type, uuid) unique_filename = str(new_uuid()) + name file_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, unique_filename) f = open(file_path, 'wb') f.write(data) if size is None: size = os.path.getsize(file_path) if name is None: name = url.split('/')[-1] file_metadata = dict(name=name, file=file_path, size=size) draft_field_list_add(current_user.get_id(), uuid, "files", file_metadata) return unique_filename
def test_waitinglist_group(self): from dateutil.rrule import rrule, DAILY, MO sc = Scheduler(new_uuid()) days = list(rrule( DAILY, count=5, byweekday=(MO,), dtstart=datetime(2012, 1, 1) )) dates = [] for d in days: dates.append( ( datetime(d.year, d.month, d.day, 15, 0), datetime(d.year, d.month, d.day, 16, 0) ) ) allocations = sc.allocate(dates, grouped=True, approve_manually=True) self.assertEqual(len(allocations), 5) group = allocations[0].group # reserving groups is no different than single allocations maintoken = sc.reserve(reservation_email, group=group) self.assertFalse( sc.reservation_by_token(maintoken).one().autoapprovable ) for allocation in allocations: self.assertEqual(allocation.waitinglist_length, 1) sc.approve_reservation(maintoken) token = sc.reserve(reservation_email, group=group) self.assertRaises(AlreadyReservedError, sc.approve_reservation, token) token = sc.reserve(reservation_email, group=group) self.assertRaises(AlreadyReservedError, sc.approve_reservation, token) sc.remove_reservation(maintoken) sc.approve_reservation(token)
def __init__(self, name=None, uuid=None, curr_obj=0, workflow_object=None, id_user=0, module_name="Unknown", **kwargs): super(BibWorkflowEngine, self).__init__() self.db_obj = None if isinstance(workflow_object, Workflow): self.db_obj = workflow_object else: # If uuid is defined we try to get the db object from DB. if uuid is not None: self.db_obj = \ Workflow.get(Workflow.uuid == uuid).first() else: uuid = new_uuid() if self.db_obj is None: self.db_obj = Workflow(name=name, id_user=id_user, current_object=curr_obj, module_name=module_name, uuid=uuid) self._create_db_obj() self.set_workflow_by_name(name) self.set_extra_data_params(**kwargs)
def test_imaginary_mirrors(self): sc = Scheduler(new_uuid()) start = datetime(2011, 1, 1, 15, 0) end = datetime(2011, 1, 1, 16, 0) daterange = (start, end) allocation = sc.allocate(daterange, quota=3)[0] self.assertTrue(allocation.is_master) mirrors = sc.allocation_mirrors_by_master(allocation) imaginary = len([m for m in mirrors if m.is_transient]) self.assertEqual(imaginary, 2) self.assertEqual(len(allocation.siblings()), 3) masters = len([m for m in mirrors if m.is_master]) self.assertEqual(masters, 0) self.assertEqual( len([s for s in allocation.siblings(imaginary=False)]), 1 ) sc.approve_reservation(sc.reserve(reservation_email, daterange)) mirrors = sc.allocation_mirrors_by_master(allocation) imaginary = len([m for m in mirrors if m.is_transient]) self.assertEqual(imaginary, 2) sc.approve_reservation(sc.reserve(reservation_email, daterange)) mirrors = sc.allocation_mirrors_by_master(allocation) imaginary = len([m for m in mirrors if m.is_transient]) self.assertEqual(imaginary, 1) sc.approve_reservation(sc.reserve(reservation_email, daterange)) mirrors = sc.allocation_mirrors_by_master(allocation) imaginary = len([m for m in mirrors if m.is_transient]) self.assertEqual(imaginary, 0) self.assertEqual(len(mirrors) + 1, len(allocation.siblings()))
def plupload(deposition_type, uuid): """ The file is splitted in chunks on the client-side and it is merged again on the server-side @return: the path of the uploaded file """ if request.method == 'POST': try: chunks = request.form['chunks'] chunk = request.form['chunk'] except KeyError: chunks = None pass name = request.form['name'] current_chunk = request.files['file'] try: filename = secure_filename(name) + "_" + chunk except UnboundLocalError: filename = secure_filename(name) CFG_USER_WEBDEPOSIT_FOLDER = create_user_file_system( current_user.get_id(), deposition_type, uuid) # Save the chunk current_chunk.save(os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, filename)) unique_filename = "" if chunks is None: # file is a single chunk unique_filename = str(new_uuid()) + filename old_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, filename) file_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, unique_filename) os.rename(old_path, file_path) # Rename the chunk size = os.path.getsize(file_path) file_metadata = dict(name=name, file=file_path, size=size) draft_field_list_add(current_user.get_id(), uuid, "files", file_metadata) elif int(chunk) == int(chunks) - 1: '''All chunks have been uploaded! start merging the chunks''' filename = secure_filename(name) chunk_files = [] for chunk_file in iglob( os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, filename + '_*')): chunk_files.append(chunk_file) # Sort files in numerical order chunk_files.sort(key=lambda x: int(x.split("_")[-1])) unique_filename = str(new_uuid()) + filename file_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, unique_filename) destination = open(file_path, 'wb') for chunk in chunk_files: shutil.copyfileobj(open(chunk, 'rb'), destination) os.remove(chunk) destination.close() size = os.path.getsize(file_path) file_metadata = dict(name=name, file=file_path, size=size) draft_field_list_add(current_user.get_id(), uuid, "files", file_metadata) return unique_filename
def add(deposition_type, uuid): """ Runs the workflows and shows the current form/output of the workflow Loads the associated to the uuid workflow. if the current step of the workflow renders a form, it loads it. if the workflow is finished or in case of error, it redirects to the deposition types page flashing also the associated message. Moreover, it handles a form's POST request for the fields and files, and validates the whole form after the submission. @param deposition_type: the type of the deposition to be run. @param uuid: the universal unique identifier for the workflow. """ status = 0 if deposition_type not in deposition_metadata: flash(_('Invalid deposition type `%s`.' % deposition_type), 'error') return redirect(url_for('.index_deposition_types')) elif uuid is None: # get the latest one. if there is no workflow created # lets create a new workflow with given deposition type workflow = get_latest_or_new_workflow(deposition_type) uuid = workflow.get_uuid() #flash(_('Deposition %s') % (uuid,), 'info') return redirect( url_for('.add', deposition_type=deposition_type, uuid=uuid)) else: # get workflow with specific uuid workflow = get_workflow(deposition_type, uuid) if workflow is None: flash(_('Deposition with uuid `') + uuid + '` not found.', 'error') return redirect(url_for('.index_deposition_types')) cache.delete_many( str(current_user.get_id()) + ":current_deposition_type", str(current_user.get_id()) + ":current_uuid") cache.add( str(current_user.get_id()) + ":current_deposition_type", deposition_type) cache.add(str(current_user.get_id()) + ":current_uuid", uuid) current_app.config['breadcrumbs_map'][request.endpoint] = [ (_('Home'), '')] + blueprint.breadcrumbs + \ [(deposition_type, 'webdeposit.index', {'deposition_type': deposition_type}), (uuid, 'webdeposit.add', {'deposition_type': deposition_type, 'uuid': uuid})] if request.method == 'POST': # Save the files for uploaded_file in request.files.values(): filename = secure_filename(uploaded_file.filename) if filename == "": continue CFG_USER_WEBDEPOSIT_FOLDER = create_user_file_system( current_user.get_id(), deposition_type, uuid) unique_filename = str(new_uuid()) + filename file_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, unique_filename) uploaded_file.save(file_path) size = os.path.getsize(file_path) file_metadata = dict(name=filename, file=file_path, size=size) draft_field_list_add(current_user.get_id(), uuid, "files", file_metadata) # Save form values for (field_name, value) in request.form.items(): if "submit" in field_name.lower(): continue draft_field_set(current_user.get_id(), uuid, field_name, value) form = get_form(current_user.get_id(), uuid) # Validate form if not form.validate(): # render the form with error messages # the `workflow.get_output` function returns also the template return render_template(**workflow.get_output(form_validation=True)) #Set the latest form status to finished set_form_status(current_user.get_id(), uuid, CFG_DRAFT_STATUS['finished']) workflow.run() status = workflow.get_status() if status != CFG_WORKFLOW_STATUS.FINISHED and \ status != CFG_WORKFLOW_STATUS.ERROR: # render current step of the workflow # the `workflow.get_output` function returns also the template return render_template(**workflow.get_output()) elif status == CFG_WORKFLOW_STATUS.FINISHED: flash( deposition_type + _(' deposition has been successfully finished.'), 'success') return redirect(url_for('.index_deposition_types')) elif status == CFG_WORKFLOW_STATUS.ERROR: flash(deposition_type + _(' deposition %s has returned error.'), 'error') current_app.logger.error('Deposition: %s has returned error. %d' % uuid) return redirect(url_for('.index_deposition_types'))
def deposit_files(user_id, deposition_type, uuid, preingest=False): """Attach files to a workflow Upload a single file or a file in chunks. Function must be called within a blueprint function that handles file uploading. Request post parameters: chunks: number of chunks chunk: current chunk number name: name of the file @param user_id: the user id @param deposition_type: the deposition the files will be attached @param uuid: the id of the deposition @param preingest: set to True if you want to store the file metadata in the workflow before running the workflow, i.e. to bind the files to the workflow and not in the last form draft. @return: the path of the uploaded file """ if request.method == 'POST': try: chunks = request.form['chunks'] chunk = request.form['chunk'] except KeyError: chunks = None current_chunk = request.files['file'] try: name = request.form['name'] except BadRequestKeyError: name = current_chunk.filename try: filename = secure_filename(name) + "_" + chunk except UnboundLocalError: filename = secure_filename(name) CFG_USER_WEBDEPOSIT_FOLDER = create_user_file_system( user_id, deposition_type, uuid) # Save the chunk current_chunk.save(os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, filename)) unique_filename = "" if chunks is None: # file is a single chunk unique_filename = str(new_uuid()) + filename old_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, filename) file_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, unique_filename) os.rename(old_path, file_path) # Rename the chunk if current_chunk.content_length != 0: size = current_chunk.content_length else: size = os.path.getsize(file_path) content_type = current_chunk.content_type or '' file_metadata = dict(name=name, file=file_path, content_type=content_type, size=size) if preingest: preingest_form_data(user_id, uuid, {'files': file_metadata}) else: draft_field_list_add(user_id, uuid, "files", file_metadata) elif int(chunk) == int(chunks) - 1: '''All chunks have been uploaded! start merging the chunks''' filename = secure_filename(name) chunk_files = [] for chunk_file in iglob( os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, filename + '_*')): chunk_files.append(chunk_file) # Sort files in numerical order chunk_files.sort(key=lambda x: int(x.split("_")[-1])) unique_filename = str(new_uuid()) + filename file_path = os.path.join(CFG_USER_WEBDEPOSIT_FOLDER, unique_filename) destination = open(file_path, 'wb') for chunk in chunk_files: shutil.copyfileobj(open(chunk, 'rb'), destination) os.remove(chunk) destination.close() size = os.path.getsize(file_path) file_metadata = dict(name=name, file=file_path, size=size) if preingest: preingest_form_data(user_id, uuid, {'files': file_metadata}, append=True) else: draft_field_list_add(user_id, uuid, "files", file_metadata) return unique_filename
def test_limit_timespan(): # if not partly availabe the limit is always the same allocation = Allocation( raster=15, resource=new_uuid(), partly_available=False, timezone='UTC' ) allocation.start = datetime(2014, 1, 1, 8, 0, tzinfo=utc) allocation.end = datetime(2014, 1, 1, 9, 0, tzinfo=utc) assert allocation.limit_timespan(time(8, 0), time(9, 0)) == ( allocation.display_start(), allocation.display_end() ) assert allocation.limit_timespan(time(7, 0), time(10, 0)) == ( allocation.display_start(), allocation.display_end() ) # if partly available, more complex things happen allocation = Allocation( raster=15, resource=new_uuid(), partly_available=True, timezone='UTC' ) allocation.start = datetime(2014, 1, 1, 8, 0, tzinfo=utc) allocation.end = datetime(2014, 1, 1, 9, 0, tzinfo=utc) assert allocation.limit_timespan(time(8, 0), time(9, 0)) == ( allocation.display_start(), allocation.display_end() ) assert allocation.limit_timespan(time(7, 0), time(10, 0)) == ( allocation.display_start(), allocation.display_end() ) assert allocation.limit_timespan(time(8, 30), time(10, 0)) == ( datetime(2014, 1, 1, 8, 30, tzinfo=utc), datetime(2014, 1, 1, 9, 0, tzinfo=utc) ) assert allocation.limit_timespan(time(8, 30), time(8, 40)) == ( datetime(2014, 1, 1, 8, 30, tzinfo=utc), datetime(2014, 1, 1, 8, 45, tzinfo=utc) ) assert allocation.limit_timespan(time(8, 30), time(0, 0)) == ( datetime(2014, 1, 1, 8, 30, tzinfo=utc), datetime(2014, 1, 1, 9, 0, tzinfo=utc) ) # no problems should arise if whole-day allocations are used allocation.start = datetime(2014, 1, 1, 0, 0, tzinfo=utc) allocation.end = datetime(2014, 1, 2, 0, 0, tzinfo=utc) assert allocation.whole_day assert allocation.limit_timespan(time(0, 0), time(23, 59)) == ( allocation.display_start(), allocation.display_end() ) assert allocation.limit_timespan(time(0, 0), time(0, 0)) == ( datetime(2014, 1, 1, 0, 0, tzinfo=utc), datetime(2014, 1, 1, 0, 0, tzinfo=utc) ) assert allocation.limit_timespan(time(8, 30), time(10, 0)) == ( datetime(2014, 1, 1, 8, 30, tzinfo=utc), datetime(2014, 1, 1, 10, 0, tzinfo=utc) ) assert allocation.limit_timespan(time(8, 30), time(8, 40)) == ( datetime(2014, 1, 1, 8, 30, tzinfo=utc), datetime(2014, 1, 1, 8, 45, tzinfo=utc) ) assert allocation.limit_timespan(time(8, 30), time(0, 0)) == ( datetime(2014, 1, 1, 8, 30, tzinfo=utc), datetime(2014, 1, 2, 0, 0, tzinfo=utc) )
def reserve(self, email, dates=None, group=None, data=None, session_id=None, quota=1): """ First step of the reservation. Seantis.reservation uses a two-step reservation process. The first step is reserving what is either an open spot or a place on the waiting list. The second step is to actually write out the reserved slots, which is done by approving an existing reservation. Most checks are done in the reserve functions. The approval step only fails if there's no open spot. This function returns a reservation token which can be used to approve the reservation in approve_reservation. """ assert (dates or group) and not (dates and group) validate_email(email) if group: dates = self.dates_by_group(group) dates = utils.pairs(dates) # First, the request is checked for saneness. If any requested # date cannot be reserved the request as a whole fails. for start, end in dates: # are the parameters valid? if abs((end - start).days) >= 1: raise ReservationTooLong if start > end or (end - start).seconds < 5 * 60: raise ReservationParametersInvalid # can all allocations be reserved? for allocation in self.allocations_in_range(start, end): # start and end are not rasterized, so we need this check if not allocation.overlaps(start, end): continue assert allocation.is_master # with manual approval the reservation ends up on the # waitinglist and does not yet need a spot if not allocation.approve_manually: if not self.find_spot(allocation, start, end): raise AlreadyReservedError free = self.free_allocations_count(allocation, start, end) if free < quota: raise AlreadyReservedError if allocation.reservation_quota_limit > 0: if allocation.reservation_quota_limit < quota: raise QuotaOverLimit if allocation.quota < quota: raise QuotaImpossible if quota < 1: raise InvalidQuota # ok, we're good to go token = new_uuid() found = 0 # groups are reserved by group-identifier - so all members of a group # or none of them. As such there's no start / end date which is defined # implicitly by the allocation if group: found = 1 reservation = Reservation() reservation.token = token reservation.target = group reservation.status = u'pending' reservation.target_type = u'group' reservation.resource = self.uuid reservation.data = data reservation.session_id = session_id reservation.email = email reservation.quota = quota Session.add(reservation) else: groups = [] for start, end in dates: for allocation in self.allocations_in_range(start, end): if not allocation.overlaps(start, end): continue found += 1 reservation = Reservation() reservation.token = token reservation.start, reservation.end = rasterize_span( start, end, allocation.raster ) reservation.target = allocation.group reservation.status = u'pending' reservation.target_type = u'allocation' reservation.resource = self.uuid reservation.data = data reservation.session_id = session_id reservation.email = email reservation.quota = quota Session.add(reservation) groups.append(allocation.group) # check if no group reservation is made with this request. # reserve by group in this case (or make this function # do that automatically) assert len(groups) == len(set(groups)), \ 'wrongly trying to reserve a group' if found: notify(ReservationMadeEvent(reservation, self.language)) else: raise InvalidReservationError return token
def allocate(self, dates, raster=15, quota=None, partly_available=False, grouped=False, approve_manually=True, reservation_quota_limit=0, whole_day=False ): """Allocates a spot in the calendar. An allocation defines a timerange which can be reserved. No reservations can exist outside of existing allocations. In fact any reserved slot will link to an allocation. An allocation may be available as a whole (to reserve all or nothing). It may also be partly available which means reservations can be made for parts of the allocation. If an allocation is partly available a raster defines the granularity with which a reservation can be made (e.g. a raster of 15min will ensure that reservations are at least 15 minutes long and start either at :00, :15, :30 or :45) The reason for the raster is mainly to ensure that different reservations trying to reserve overlapping times need the same keys in the reserved_slots table, ensuring integrity at the database level. Allocations may have a quota, which determines how many times an allocation may be reserved. Quotas are enabled using a master-mirrors relationship. The master is the first allocation to be created. The mirrors copies of that allocation. See Scheduler.__doc__ """ dates = utils.pairs(dates) group = new_uuid() quota = quota or 1 # if the allocation is not partly available the raster is set to lowest # possible raster value raster = partly_available and raster or MIN_RASTER_VALUE # the whole day option results in the dates being aligned to # the beginning of the day / end of it -> not timezone aware! if whole_day: for ix, (start, end) in enumerate(dates): dates[ix] = utils.align_range_to_day(start, end) # Ensure that the list of dates contains no overlaps inside for start, end in dates: if utils.count_overlaps(dates, start, end) > 1: raise InvalidAllocationError # Make sure that this span does not overlap another master for start, end in dates: start, end = rasterize_span(start, end, raster) existing = self.allocations_in_range(start, end).first() if existing: raise OverlappingAllocationError(start, end, existing) # Write the master allocations allocations = [] for start, end in dates: allocation = Allocation(raster=raster) allocation.start = start allocation.end = end allocation.resource = self.uuid allocation.quota = quota allocation.mirror_of = self.uuid allocation.partly_available = partly_available allocation.approve_manually = approve_manually allocation.reservation_quota_limit = reservation_quota_limit if grouped: allocation.group = group else: allocation.group = new_uuid() allocations.append(allocation) Session.add_all(allocations) return allocations
def __init__(self, name=None, uuid=None, curr_obj=0, workflow_object=None, id_user=0, module_name="Unknown", **kwargs): """Instantiate a new BibWorkflowEngine object. This object is needed to run a workflow and control the workflow, like at which step of the workflow execution is currently at, as well as control object manipulation inside the workflow. You can pass several parameters to personalize your engine, but most of the time you will not need to create this object yourself as the :py:mod:`.api` is there to do it for you. :param name: name of workflow to run. :type name: str :param uuid: pass a uuid to an existing workflow. :type uuid: str :param curr_obj: internal id of current object being processed. :type curr_obj: int :param workflow_object: existing instance of a Workflow object. :type workflow_object: Workflow :param id_user: id of user to associate with workflow :type id_user: int :param module_name: label used to query groups of workflows. :type module_name: str """ super(BibWorkflowEngine, self).__init__() self.db_obj = None if isinstance(workflow_object, Workflow): self.db_obj = workflow_object else: # If uuid is defined we try to get the db object from DB. if uuid is not None: self.db_obj = \ Workflow.get(Workflow.uuid == uuid).first() else: uuid = new_uuid() if self.db_obj is None: self.db_obj = Workflow(name=name, id_user=id_user, current_object=curr_obj, module_name=module_name, uuid=uuid) self.save(status=WorkflowStatus.NEW) if text_type(self.db_obj.uuid) not in self.log.name: db_handler_obj = BibWorkflowLogHandler(BibWorkflowEngineLog, "uuid") self.log = get_logger(logger_name="workflow.%s" % self.db_obj.uuid, db_handler_obj=db_handler_obj, obj=self) self.set_workflow_by_name(self.db_obj.name) self.set_extra_data_params(**kwargs)