Beispiel #1
0
 def test_run_too_soon(self):
     job = Job(name='testsuite-job', username='******')
     job.status = Status.WAIT
     job.status = Status.RUN
     # Status.WAIT should be removed so the len should be 2 rather
     # than 3.
     assert len(job.status_history) == 2, 'history length should be 2'
Beispiel #2
0
    def notify(self, evt):
        Job.notify(self, evt)
        log.debug("Notified of event: %s" % str(evt.__class__))
        if isinstance(evt, simple.ConnectCompleteEvent) and \
            evt.getSource() == self.connect_job:
            self.conn = evt.getConnection()
            self.run()
        elif isinstance(evt, simple.ConnectFailedEvent) and \
              evt.getSource() == self.connect_job:
            log.error("Failed to connect to agent")
            print "Failed to connect to agent"
            self.getAgent().setState(agent.STOPPING)

        elif isinstance(evt, agent.MessageReceivedEvent) and \
              isinstance(evt.getMessage(), message.Response):

            if isinstance(evt.getMessage(), agent.OkResponse) and \
               self.key == evt.getMessage().getRequestKey():
                print "Shutdown Acknowledged"
                self.getAgent().setState(agent.STOPPING)

            elif isinstance(evt.getMessage(), agent.DeniedResponse) and \
               self.key == evt.getMessage().getRequestKey():
                print "Shutdown Denied"
                self.getAgent().setState(agent.STOPPING)
Beispiel #3
0
 def test_run_too_soon(self):
     job = Job('test')
     job.status = Status.WAIT
     job.status = Status.RUN
     # Status.WAIT should be removed so the len should be 2 rather
     # than 3.
     assert len(job.status_history) == 2, 'history length should be 2'
Beispiel #4
0
def main():
	pygame.init()
	fpsClock = pygame.time.Clock()
	size = 1920, 1200
	background = pygame.image.load("go.jpg")
	screen = pygame.display.set_mode(size)
	screen.blit(background,(0,0))
	width = 0

	while 1:
		for event in pygame.event.get():
			if event.type == pygame.QUIT:
				sys.exit()
			elif event.type==pygame.KEYDOWN:
				if event.key==pygame.K_ESCAPE:
					sys.exit()
									
		pseudoJob = Job(0)
		myjob = pseudoJob.getData()
		
		for n in range(0, len(myjob)):
			drawing(width, myjob[n], screen)
			if (n != len(myjob) -1):
				width += (myjob[n].node * myjob[n].core) / 5
			else:
				width = 0
		print myjob[0].qtime
		pygame.display.flip()
		fpsClock.tick(30)
Beispiel #5
0
 def _add_gc_job(self):
     job = Job()
     job.name = 'gc'
     job.job_func= self.gc
     job.set_frequency(10, 'minutes')
     self._add_job(job)
     self.instantiate(job)
Beispiel #6
0
    def test_submit_job_to_ptburn_with_audio(self, label_patch, required_files,
                                             glob_patch):

        required_files.return_value = True
        label = Mock(sermon_title="title", date=datetime.date(2014,1,1),
           day="SUN, AM", minister="H.L. Sheppard")
        label.get_label_key.return_value = "010114PM"
        glob_patch.return_value = ["one.mp3", "two.mp3"]
        
        new_job = Job(1, self.connection)
        new_job.printonly = False
        new_job.label = label
        new_job.submit_job_to_ptburn("/audio/", "/labels/","/tmp/")

        self.assertTrue(os.path.exists("/tmp/JOB_1.jrq"))
        job_file = open("/tmp/JOB_1.jrq").readlines()
        self.assertEquals(len(job_file), 11)
        self.assertTrue("JobID = 1\n" in job_file)
        self.assertTrue("VolumeName = 010114PM\n" in job_file)
        self.assertTrue("AudioFile = one.mp3\n" in job_file)
        self.assertTrue("AudioFile = two.mp3\n" in job_file)
        self.assertTrue("CloseDisc = YES\n" in job_file)
        self.assertTrue("Copies = 1\n" in job_file)
        self.assertTrue("PrintLabel = /labels/cd-label.std\n" in job_file)
        self.assertTrue("MergeField = title\n" in job_file)
        self.assertTrue("MergeField = 01/01/2014\n" in job_file)
        self.assertTrue("MergeField = SUN, AM\n" in job_file)
        self.assertTrue("MergeField = H.L. Sheppard\n" in job_file)
Beispiel #7
0
 def do_job(self, line):
     conn = self.conn()
     if conn:
         #print "Line = %s" % line
         r = cmd.Cmd.parseline(self, line)
         print "job = " , r
         if r[0] and r[1]:
             print "Sending JOB : %s" % r[0]
             conn.send('job')
             ## On doit recevoir  Ok send your job
             print "Srv => ", conn.recv()
             j=Job()
             j.name=r[0]
             j.cmd=r[1]
             conn.send(j)
             ## On doit recevoir Job receiveing ou erreur "
             print "Srv => ", conn.recv()
             ## On doit recevoir Job job finish"
             print "Srv => ", conn.recv()
             ## On doit recevoir le job
             j = conn.recv()
             ## On doit recevoir Invite de fin OK see your soon"
             print "Srv => ", conn.recv()
             ## la connexion doit se terminer
             ## Affichage du resultat
             j.pr()
         else:
             print "Job incorrecte [%s] [%s]" % (r[0], r[1])
     else:
         print "Not connected"
     ## Dans tout les cas la connexion est ferme
     conn=None
Beispiel #8
0
 def load(self,source=None,source_filename=None,selected_nodes=None):
     # Check if similar job exists
     jobs = self.session.query(Job).filter(Job.source_filename == unicode(source_filename)).all()
     if len(jobs)==0:
         job = Job(source=source,source_filename=source_filename,selected_nodes=selected_nodes)
         job.material = self.session.query(Material).first()
         if job.load():
             # update the ui with job info
             self.job = job
             self._flash(0,'%s loaded'%os.path.basename(job.source_filename or 'File'))
             self._update_preview()
             return True
         else:
             msg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
                 buttons=gtk.BUTTONS_CLOSE,
                 message_format=job.messages.pop())
             msg.set_title('Error opening file')
             msg.run()
             msg.destroy()
             return False
     else:
         # open a new job or
         msg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
             buttons=gtk.BUTTONS_CLOSE,
             message_format='A similar job was already found, do you want to load that?')
         msg.set_title('File Already Found opening file')
         msg.run()
         msg.destroy()
         return False
Beispiel #9
0
	def __init__(self,doc):
		Job.__init__(self, doc)
		try:
			self.format = self.format
		except AttributeError:
			self.format = "json"
		try:
			self.coll_type = self.coll_type
		except AttributeError:
			self.coll_type = None
		
		self._dict_values = {}
		self._dict_values["sources"] = {
							"filename": "%s/export_%s_sources_%s.%s" %(self.project_name, self.name, self.date, self.format),
							"format": self.format,
							"fields": 'url,origin,date.date',
							}
		self._dict_values["logs"] = {
							"filename": "%s/export_%s_logs_%s.%s" %(self.project_name,self.name, self.date, self.format), 
							"format":self.format,
							"fields": 'url,code,scope,status,msg',
							}
		self._dict_values["results"] = {
							"filename": "%s/export_%s_results_%s.%s" %(self.project_name,self.name, self.date, self.format), 
							"format":self.format,
							"fields": 'url,domain,title,content.content,outlinks.url,crawl_date',
							}	
Beispiel #10
0
    def test_run_timeout(self):

        from job_dictator import JobDictator
        from job import Job
        from worker import Worker

        dictator = JobDictator()
        dictator.client = mock.MagicMock()
        dictator.client.keys.return_value = ["job-", "jm-"]
        job = Job("running", "something")
        job.run_started_on = datetime.now() - timedelta(minutes=10)
        worker = Worker("job-", None)
        dictator.client.get.side_effect = [pickle.dumps(job), pickle.dumps(worker)]
        self.request_mock.get = mock.MagicMock()
        dictator.settings = mock.MagicMock()
        dictator.settings.job_timeout = 1
        dictator.headers = mock.MagicMock()
        returner = mock.MagicMock()
        returner.content = "status:ok"
        self.request_mock.get.return_value = returner
        dictator.pull = mock.MagicMock()

        dictator.aladeen()

        assert dictator.client.keys.call_count == 2
        assert dictator.client.get.call_count == 2
        assert dictator.client.set.call_count == 1
        assert dictator.client.publish.call_count == 1
        assert dictator.pull.call_count == 0
        assert pickle.loads(dictator.client.set.call_args_list[0][0][1]).state == "broken"
def test():
    print "Start test"
    global dbName
    dbName = "test.db"

    clear()
    jobs = loadJobs()
    assert len(jobs) == 0

    newJob = Job("test")
    addNewJob(newJob)
    jobs = loadJobs()
    assert len(jobs) == 1
    assert jobs.index(newJob) >= 0

    newJob = jobs[newJob.id]
    newJob.recipeRef = "new test"
    saveJobs(jobs)
    jobs = loadJobs()
    assert newJob.recipeRef == jobs[newJob.id].recipeRef

    clear()
    jobs = loadJobs()
    assert len(jobs) == 0

    print "End test"
Beispiel #12
0
    def testJobExecuteOnce(self):
        work_data = [1.111111] * 5
        test_job = Job(0, 0, work_data)

        test_job.execute_next()

        self.assertEqual(int(test_job.work_data[0]), 1112)
Beispiel #13
0
        def Move(self, pv_src_obj, pv_source_range, pv_dest_obj,
                 pv_dest_range, move_options):
            pv_dest = None
            pv_src = cfg.om.get_by_path(pv_src_obj)
            if pv_src:
                if pv_dest_obj != '/':
                    pv_dest_t = cfg.om.get_by_path(pv_dest_obj)
                    if not pv_dest_t:
                        raise dbus.exceptions.DBusException(
                            interface_name, 'pv_dest_obj (%s) not found' %
                            pv_src_obj)
                    pv_dest = pv_dest_t.lvm_id

                rc, out, err = cmdhandler.pv_move_lv(
                    move_options,
                    self.lvm_id,
                    pv_src.lvm_id,
                    pv_source_range,
                    pv_dest,
                    pv_dest_range)

                if rc == 0:
                    # Create job object for monitoring
                    job_obj = Job(self.lvm_id, None)
                    cfg.om.register_object(job_obj)
                    cfg.kick_q.put("wake up!")
                    return job_obj.dbus_object_path()
                else:
                    raise dbus.exceptions.DBusException(
                        interface_name,
                        'Exit code %s, stderr = %s' % (str(rc), err))
            else:
                raise dbus.exceptions.DBusException(
                    interface_name, 'pv_src_obj (%s) not found' % pv_src_obj)
Beispiel #14
0
 def create_job(self,filename,**kwargs):
     """Create a job and try to set the source. Returns bool success."""
     job = Job(**kwargs)
     # Get the default material
     job.material = self.get_material()
     try:
         job.set_source(filename)
         self.job = job
         self.session.add(self.job)
         msg = 'Loaded %s'%os.path.basename(job.name or 'File')
         self.get_window('inkcut').set_title("*%s - Inkcut"%job.name)
         self.flash(msg)
         self.on_plot_feed_distance_changed(self.get_widget('plot-properties','plot-feed'))
         self._update_ui()
         return False
     except Exception, err:
         # update the ui with job info
         log.debug(traceback.format_exc())
         msg = Gtk.MessageDialog(type=Gtk.MessageType.ERROR,
             buttons=Gtk.ButtonsType.OK,
             message_format="Issue loading file")
         msg.format_secondary_text(err)
         msg.run()
         msg.destroy()
         return False
def search(query):
    job = Job(query) #use name prep from job init
    result = cache.get(job.hashtag)
    if not result:
        result = json.dumps(job.execute()) 
        cache.add(job.hashtag, result)
    return result
Beispiel #16
0
 def getFromDB():
     #l = {}
     #cur_iter = Job.currentIteration()
     logging.info("Get all jobs from db")
     #l['iteration'] = cur_iter
     
     jobs = Job.getAll()
     return Job.dump(jobs)
Beispiel #17
0
    def testJobExecutesUntilFinish(self):
        work_data = [1.111111] * 5
        test_job = Job(0, 0, work_data)

        while not test_job.is_finished():
            test_job.execute_next()

        self.assertEqual(int(test_job.work_data[-1]), 1112)
Beispiel #18
0
	def show(self):
		for job in self.__COLL__.find():
			try:
				print "-", job['name'], job['action'], job["active"], job["user"], job["date"].strftime('%d-%m-%Y')
			except KeyError:
				print job
				out = Job(job)
				out.delete()
Beispiel #19
0
    def test_get_status_from_db(self, label_patch):


        new_job = Job(1, self.connection)
        self.mock_row.status = 2
        new_job.get_status_from_db(self.connection)

        self.assertEquals(new_job.status, 2)
Beispiel #20
0
    def test_empty_history(self):
        job = Job(name='testsuite-job', username='******')

        job.status = Status.WAIT
        job.status = Status.RUN
        job.status_history = []
        # An empty history should not happen, but if it did, the value
        # should be Status.INIT.
        assert job.status == Status.INIT, 'status should be Status.INIT'
Beispiel #21
0
    def testJobExecutesOnMoreThanOneElement(self):
        work_data = [1.111111] * 5
        test_job = Job(0, 0, work_data)

        for i in range(2000):
            test_job.execute_next()

        self.assertEqual(int(test_job.work_data[0]), 1112)
        self.assertEqual(int(test_job.work_data[1]), 1112)
Beispiel #22
0
    def test_set_dict(self):
        job = Job('test')

        # testing some untested cases in set_dict()
        job.status = Status.ERROR
        assert job.status.css == 'danger', 'status.css should be "danger".'

        job.status = '404'
        assert job.status.css == 'default', 'status.css should be "default".'
Beispiel #23
0
    def test_empty_history(self):
        job = Job('test')

        job.status = Status.WAIT
        job.status = Status.RUN
        job.status_history = []
        # An empty history should not happen, but if it did, the value
        # should be Status.INIT.
        assert job.status == Status.INIT, 'status should be Status.INIT'
Beispiel #24
0
    def test_required_files_exist_with_files(self, label_patch, glob_patch):

        label_patch.get_label_key.return_value = "010114PM"
        glob_patch.return_value = ["one.mp3", "two.mp3"]

        new_job = Job(1, self.connection)
        self.assertTrue(new_job.required_files_exist("/audio/"))
        label_patch.get_label_key.called_once()
        glob_patch.called_once_with("/audio/010114PM*.mp3")
Beispiel #25
0
    def test_set_dict(self):
        job = Job(name='testsuite-job', username='******')

        # testing some untested cases in set_dict()
        job.status = Status.ERROR
        assert job.status.css == 'danger', 'status.css should be "danger".'

        job.status = '404'
        assert job.status.css == 'default', 'status.css should be "default".'
Beispiel #26
0
	def create(self):
		self._logs["step"] = "registering new user"
		self._logs["status"] = True
		self.user = self.name
		self.name = "none"
		self.action = "crawl"
		self.active = False
		j = Job(self.__dict__)
		return j.create()
Beispiel #27
0
 def refresh_jobs(self):
     """ Run all jobs """
     with settings(host_string="%s@%s" % (self.user, self.ip_address)):
         # Run init from Base job class, once per run
         job_obj = Job()
         job_obj.update_packages()
         # Run all nodes jobs
         for job in self.jobs:
             self.run_single_job(job)
Beispiel #28
0
    def render(self, request):
        if request.args.has_key("string"):
            string = request.args["string"][0]
            j = Job()
            j.name = "JOB 0001"
            j.cmd = string
            self.worker.job = j
            self.worker.work()
            #rc, err, out, cmd, ilog  = self.worker.work(string)
            rc = j.returncode
            err = j.stderr
            out = j.stdout
            cmd = j.cmd
            ilog = j.ilog
        else:
            rc, err, out, cmd, ilog  = ( 0, "", "", "<>", [] )

        rep = """
        <html><body><form>
        <input type='text' name='string' value='%s' />
        <input type='submit' value='Go' />
        """
        rep += "<hr/>"
        rep += "<table border=1>"
        rep += "<tr>"
        rep += "<th> Libelle </th>"
        rep += "<th> Valeur  </th>"
        rep += "</tr>"

        rep += "<tr>"
        rep += "<td> Commande  </td>"
        rep += "<td>  %s </td>" % cmd
        rep += "</tr>"

        rep += "<tr>"
        rep += "<td> stderr </td>"
        rep += "<td>  %s </td>" % err
        rep += "</tr>"

        rep += "<tr>"
        rep += "<td> stdout </td>"
        rep += "<td>  %s </td>" % out
        rep += "</tr>"

        for l in ilog:
            rep += "<tr>"
            rep += "<td></td>"
            rep += "<td> %s </td>" % l
            rep += "</tr>"

        rep += "</table>"
        rep +=  """
        </form></body></html>
        """
               
        return rep
Beispiel #29
0
	def execute_job(jobfile):
		j = Job(None)
		j.loadyaml(jobfile)
		if j.jobtype == "unittest":
			print "--------------------------------------------------"
			print "started test:\t" + j.name
			ret_status = j.execute()
			return j,ret_status
		else:
			return j, []
Beispiel #30
0
 def submit( self , name , cmd , run_path , argList , num_cpu = 1 , blocking = False):
     argc = len( argList )
     argv = (ctypes.c_char_p * argc)()
     argv[:] = map( str , argList )
     job_c_ptr = cfunc.submit( self , cmd , num_cpu , run_path , name , argc , argv )
     job = Job( self , job_c_ptr , blocking )
     if blocking:
         job.block()
         job = None
     return job
Beispiel #31
0
 def get_job(self, job_id):
     cas_response = self.api.describe_job(self.name, job_id)
     return Job(self, cas_response)
Beispiel #32
0
    def _run_app_internal(self, app_id, params, tag, version, cell_id, run_id,
                          **kwargs):
        """
        Attemps to run the app, returns a Job with the running app info.
        Should *hopefully* also inject that app into the Narrative's metadata.
        Probably need some kind of JavaScript-foo to get that to work.

        Parameters:
        -----------
        app_id - should be from the app spec, e.g. 'build_a_metabolic_model'
                    or 'MegaHit/run_megahit'.
        params - the dictionary of parameters.
        tag - optional, one of [release|beta|dev] (default=release)
        version - optional, a semantic version string. Only released modules
                  have versions, so if the tag is not 'release', and a version
                  is given, a ValueError will be raised.
        **kwargs - these are the set of parameters to be used with the app.
                   They can be found by using the app_usage function. If any
                   non-optional apps are missing, a ValueError will be raised.
        """

        # TODO: this needs restructuring so that we can send back validation
        # failure messages. Perhaps a separate function and catch the errors,
        # or return an error structure.

        # Intro tests:
        self.spec_manager.check_app(app_id, tag, raise_exception=True)

        if version is not None and tag != "release":
            if re.match(r'\d+\.\d+\.\d+', version) is not None:
                raise ValueError(
                    "Semantic versions only apply to released app modules. " +
                    "You can use a Git commit hash instead to specify a " +
                    "version.")

        # Get the spec & params
        spec = self.spec_manager.get_spec(app_id, tag)

        # There's some branching to do here.
        # Cases:
        # app has behavior.kb_service_input_mapping - valid long-running app.
        # app has behavior.output_mapping - not kb_service_input_mapping or
        #     script_module - it's a viewer and should return immediately
        # app has other things besides kb_service_input_mapping - not valid.
        if 'behavior' not in spec:
            raise Exception("This app appears invalid - " +
                            "it has no defined behavior")

        if 'kb_service_input_mapping' not in spec['behavior']:
            raise Exception("This app does not appear to be a long-running " +
                            "job! Please use 'run_local_app' to start this " +
                            "instead.")

        # Preflight check the params - all required ones are present, all
        # values are the right type, all numerical values are in given ranges
        spec_params = self.spec_manager.app_params(spec)
        spec_params_map = dict((spec_params[i]['id'], spec_params[i])
                               for i in range(len(spec_params)))

        ws_input_refs = extract_ws_refs(app_id, tag, spec_params, params)

        ws_id = system_variable('workspace_id')
        if ws_id is None:
            raise ValueError('Unable to retrive current ' +
                             'Narrative workspace information!')

        input_vals = self._map_inputs(
            spec['behavior']['kb_service_input_mapping'], params,
            spec_params_map)

        service_method = spec['behavior']['kb_service_method']
        service_name = spec['behavior']['kb_service_name']
        service_ver = spec['behavior'].get('kb_service_version', None)

        # Let the given version override the spec's version.
        if version is not None:
            service_ver = version

        # This is what calls the function in the back end - Module.method
        # This isn't the same as the app spec id.
        function_name = service_name + '.' + service_method
        job_meta = {'tag': tag}
        if cell_id is not None:
            job_meta['cell_id'] = cell_id
        if run_id is not None:
            job_meta['run_id'] = run_id

        # We're now almost ready to run the job. Last, we need an agent token.
        try:
            token_name = 'KBApp_{}'.format(app_id)
            token_name = token_name[:self.__MAX_TOKEN_NAME_LEN]
            agent_token = auth.get_agent_token(auth.get_auth_token(),
                                               token_name=token_name)
        except Exception as e:
            raise

        job_meta['token_id'] = agent_token['id']
        # This is the input set for NJSW.run_job. Now we need the workspace id
        # and whatever fits in the metadata.
        job_runner_inputs = {
            'method': function_name,
            'service_ver': service_ver,
            'params': input_vals,
            'app_id': app_id,
            'wsid': ws_id,
            'meta': job_meta
        }
        if len(ws_input_refs) > 0:
            job_runner_inputs['source_ws_objects'] = ws_input_refs

        # Log that we're trying to run a job...
        log_info = {
            'app_id': app_id,
            'tag': tag,
            'version': service_ver,
            'username': system_variable('user_id'),
            'wsid': ws_id
        }
        kblogging.log_event(self._log, "run_app", log_info)

        try:
            job_id = clients.get("job_service").run_job(job_runner_inputs)
        except Exception as e:
            log_info.update({'err': str(e)})
            kblogging.log_event(self._log, "run_app_error", log_info)
            raise transform_job_exception(e)

        new_job = Job(job_id,
                      app_id,
                      input_vals,
                      system_variable('user_id'),
                      tag=tag,
                      app_version=service_ver,
                      cell_id=cell_id,
                      run_id=run_id,
                      token_id=agent_token['id'])

        self._send_comm_message(
            'run_status', {
                'event': 'launched_job',
                'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
                'cell_id': cell_id,
                'run_id': run_id,
                'job_id': job_id
            })
        JobManager().register_new_job(new_job)
        if cell_id is not None:
            return
        else:
            return new_job
Beispiel #33
0
    def test_dmdsec_from_csv_parsed_metadata_both(self):
        """It should create a dmdSec for DC and Other parsed metadata."""
        data = collections.OrderedDict(
            [
                ("dc.title", ["Yamani Weapons"]),
                ("dc.contributor", ["雪 ユキ".encode("utf8")]),
                ("dcterms.isPartOf", ["AIC#42"]),
                ("Title", ["Yamani Weapons"]),
                ("Contributor", ["雪 ユキ".encode("utf8")]),
                (
                    "Long Description",
                    ["This is about how glaives are used in the Yamani Islands"],
                ),
            ]
        )
        # Test
        state = create_mets_v2.MetsState()
        ret = create_mets_v2.createDmdSecsFromCSVParsedMetadata(
            Job("stub", "stub", []), data, state
        )
        # Verify
        assert ret
        assert len(ret) == 2
        # Return can be DC or OTHER first, but in this case DC should be first
        dc_dmdsec = ret[0]
        assert dc_dmdsec.tag == "{http://www.loc.gov/METS/}dmdSec"
        assert "ID" in dc_dmdsec.attrib
        mdwrap = dc_dmdsec[0]
        assert mdwrap.tag == "{http://www.loc.gov/METS/}mdWrap"
        assert "MDTYPE" in mdwrap.attrib
        assert mdwrap.attrib["MDTYPE"] == "DC"
        xmldata = mdwrap[0]
        assert xmldata.tag == "{http://www.loc.gov/METS/}xmlData"
        dc_elem = xmldata[0]
        # Elements are children of dublincore tag
        assert dc_elem.tag == "{http://purl.org/dc/terms/}dublincore"
        assert len(dc_elem) == 3
        assert dc_elem[0].tag == "{http://purl.org/dc/elements/1.1/}title"
        assert dc_elem[0].text == "Yamani Weapons"
        assert dc_elem[1].tag == "{http://purl.org/dc/elements/1.1/}contributor"
        assert dc_elem[1].text == "雪 ユキ"
        assert dc_elem[2].tag == "{http://purl.org/dc/terms/}isPartOf"
        assert dc_elem[2].text == "AIC#42"

        other_dmdsec = ret[1]
        assert other_dmdsec.tag == "{http://www.loc.gov/METS/}dmdSec"
        assert "ID" in other_dmdsec.attrib
        mdwrap = other_dmdsec[0]
        assert mdwrap.tag == "{http://www.loc.gov/METS/}mdWrap"
        assert "MDTYPE" in mdwrap.attrib
        assert mdwrap.attrib["MDTYPE"] == "OTHER"
        assert "OTHERMDTYPE" in mdwrap.attrib
        assert mdwrap.attrib["OTHERMDTYPE"] == "CUSTOM"
        xmldata = mdwrap[0]
        assert xmldata.tag == "{http://www.loc.gov/METS/}xmlData"
        # Elements are direct children of xmlData
        assert len(xmldata) == 3
        assert xmldata[0].tag == "title"
        assert xmldata[0].text == "Yamani Weapons"
        assert xmldata[1].tag == "contributor"
        assert xmldata[1].text == "雪 ユキ"
        assert xmldata[2].tag == "long_description"
        assert (
            xmldata[2].text
            == "This is about how glaives are used in the Yamani Islands"
        )
Beispiel #34
0
 def _fixup_fileid_state(self):
     """For items on-disk we have to mimic the filename cleanup process."""
     for key, _ in dict(self.state.fileNameToFileID).items():
         self.state.fileNameToFileID[
             create_mets_v2._fixup_path_input_by_user(Job("stub", "stub", []), key)
         ] = self.state.fileNameToFileID.pop(key)
                                                 args.jar_file)

        # Render the normalized json file
        out_fname = os.path.join(
            args.output_dir, "{0}.{1}".format(tiles_fname_prefix, args.format))
        if not os.path.exists(out_fname):
            dependencies = []
            if job_normalize != None:
                dependencies.append(job_normalize)
            job_render = Render2D(dependencies,
                                  norm_json,
                                  out_fname,
                                  -1,
                                  args.jar_file,
                                  threads_num=8)

    # Run all jobs
    if args.keeprunning:
        Job.keep_running()
    elif args.multicore:
        # Bundle jobs for multicore nodes
        # if RUN_LOCAL:
        #     print "ERROR: --local cannot be used with --multicore (not yet implemented)."
        #     sys.exit(1)
        Job.multicore_run_all()
    elif args.multicore_keeprunning:
        # Bundle jobs for multicore nodes
        Job.multicore_keep_running()
    else:
        Job.run_all()
Beispiel #36
0
def main():
    """"
    RE: Weekly
    If today is Tuesday and payday is Friday
    1) Tuesday = 1, Friday = 4
    We need to advance the date by 3
    2) If today is Friday and payday is Tuesday
    We need to decrement date by 3
    1) current day 1 - target date  4 = -3 but -(-3) is + 3 so...
    1) target day 4 - current date 1 = 3
    2) current day 4 - target date = 3
    2) target day 1 - target date 4 = -3
    if relativedelta is days=+ want second option
    if reltivedelta is days=- want first option
    I think this is commutative so either option works
    provided correct operator is used
    +7 brings target date to next week so gtg
    JOB OBJECT - job series and data data
    Read in from initial list, handle per freq
    add summary, copy init processed job to last and push
    initial copy series to all jobs list
    """
    pd.options.mode.chained_assignment = None
    opsys = platform.platform()
    print(opsys)
    if "Linux" in opsys:
        PATHDATA = "~/payplay/payplay/fakepay2.xlsx"
    else:
        PATHDATA = "c:/code/fakepay2.xlsx"
    data = pd.DataFrame(pd.read_excel(PATHDATA))
    today = dt.date.today()
    wkday = today.weekday()
    qtr = pd.Timestamp(today).quarter
    datedata = (today, today.year, today.month, today.day, wkday, qtr)
    # clean
    print(data.columns)
    data.columns = cons.newcols
    print(data)
    data = data.apply(lambda x: x.astype(str).str.lower())
    print(data)
    jobs = []
    last_job = []
    for i in range(len(data.index)):
        # job = data.iloc[i]
        job = Job(data.iloc[i], datedata)
        joblist = job.listify()  # testing class function
        for item in joblist:
            print(item)
        newyoke = job.unlist_before(joblist)
        print(newyoke)
        if job.jobdata["FREQUENCY"] == "weekly":
            for a in range(0, 52):
                # joblist = cons.TEMPLATE
                # clopy = job
                if a < 1:
                    job = utils.handle_weekly(job)
                    job.current_job = build_sum(job)
                    jobs.append(job.current_job)
                    job.last_job = job.current_job
                    # joblist.clear()
                else:
                    # Something broken in here, appending dates to existing lists or similar...
                    job = utils.run_weekly(job)
                    job.current_job = build_sum(job)
                    jobs.append(job.current_job)
                    job.last_job = job.current_job
                    # current.clear()
                    # Remaining stuff goes here
        elif "quarterly" in job.jobdata["FREQUENCY"]:
            after = False
            if job.jobdata["FREQUENCY"] == "quarterly-after":
                after = True
            for a in range(0, 4):
                push = a
                if push < 1:
                    joblist = utils.handle_qtr(job, datedata, after)
                    jobs.append(joblist)
                    last_job = joblist
                    print("DEBUG QTR - LIST: ", last_job)
                    # joblist.clear()
                else:

                    joblist = utils.run_qtr_jobs(last_job, push)
                    jobs.append(joblist)
                    last_job = joblist
                    # joblist = []
                # Remaining stuff goes here
        else:
            pass
    print("DEBUG - OUTPUT JOBS")
    for item in jobs:
        print("LIST LEN: ", len(item))
        print("LIST: ", item)
    final = pd.DataFrame(jobs, columns=cons.final_cols)
    # final = final.transpose()
    final.columns = cons.final_cols
    final.to_csv("c:/code/test/tryagain.csv")
def measure_speed():
    '''
	use 7 bladeservers, imagenet dataset, lenet neural network
	parameter size 13MB, batch_size=1,2,4,8,16,... this may changes bandwidth requirement.
	training speed: about 90 samples/second for local computation with batch_size 128; 10 samples/second with batch_size 1
	NIC: 10Gbps
	asynchronous training vs synchronous training
	each experiment is 5mins
	# ps		1	2	3	4	5	6
	# worker	1...6   1...5   1...4   1...3   1...2   1
	'''

    job_id = 0
    cwd = os.getcwd() + '/'
    stats = []  # element format (#ps, #worker, speed, cpu)
    txt = 'stats.txt'
    if os.path.isfile(txt):  # back up
        time_str = str(int(time.time()))
        fn = './results/' + time_str + '.' + txt
        os.system('cp ' + txt + ' ' + fn)
    f = open(txt, 'w')  # clear txt
    f.close()

    num_node = len(node_list) * 6  # at most 42 pods in total
    kv_stores = ['dist_async']
    # batch_sizes = ['1','2','4','8','16','32','64','128']
    batch_sizes = ['1'] + [str(i) for i in xrange(2, 22, 2)]

    tic = time.time()

    for batch_size in batch_sizes:
        for kv_store in kv_stores:
            if kv_store == 'dist_sync':
                batch_size = str(int(batch_size) * 10)
            for num_ps in xrange(
                    10, 11,
                    2):  # to save time, change to xrange(1, num_node, 2)
                for num_worker in xrange(
                        10, 11, 2
                ):  # to save time, change to xrange(1, num_node-num_ps+1, 2)
                    job_id += 1

                    logger.info("------------------start job " + str(job_id) +
                                "-------------------")
                    toc = time.time()
                    logger.info("time elapsed: " + str((toc - tic) / 60) +
                                " minutes")

                    measure_job = Job('measurement-imagenet', job_id, cwd)
                    measure_job.set_ps_resources(num_ps, ps_cpu, ps_mem)
                    #measure_job.set_ps_placement(node_list[:num_ps])

                    measure_job.set_worker_resources(num_worker, worker_cpu,
                                                     worker_mem)
                    #measure_job.set_worker_placement(node_list[num_ps:num_ps+num_worker])

                    placement_list = node_list * 6
                    measure_job.set_ps_placement(placement_list[:num_ps])
                    measure_job.set_worker_placement(
                        placement_list[num_ps:num_ps + num_worker])

                    image = 'yhpeng/k8s-mxnet-measurement'
                    script = '/init.py'
                    prog = '/mxnet/example/image-classification/train_imagenet.py'
                    work_dir = '/mxnet/example/image-classification/data/'
                    mount_dir_prefix = '/data/k8s-workdir/measurement/'
                    measure_job.set_container(image, script, prog, work_dir,
                                              mount_dir_prefix)

                    measure_job.set_data(data_train='imagenet_data_train_1.rec', data_val='',\
                    hdfs_data_train='/k8s-mxnet/imagenet/imagenet_data_train_1.rec', hdfs_data_val='')
                    measure_job.set_network('resnet', '50')
                    measure_job.set_training('100',
                                             batch_size,
                                             kv_store,
                                             gpus='')
                    measure_job.set_disp('1')
                    measure_job.set_mxnet(kv_store_big_array_bound=1000 * 1000)

                    measure_job.start()

                    counter = 0
                    while (True):
                        try:
                            time.sleep(60)
                        except:
                            logger.info("detect Ctrl+C, exit...")
                            measure_job.delete(True)
                            sys.exit(0)

                        counter += 1
                        try:
                            speed_list = measure_job.get_training_speed()
                            (ps_metrics,
                             worker_metrics) = measure_job.get_metrics()
                        except:
                            logger.info("get training speed error!")
                            measure_job.delete(True)
                            sys.exit(0)
                        # compute cpu usage difference
                        ps_cpu_usage_list = []
                        for metrics in ps_metrics:
                            ps_cpu_usage_list.append(
                                metrics['cpu/usage_rate'] / 1000.0)
                        ps_cpu_diff = max(ps_cpu_usage_list) - min(
                            ps_cpu_usage_list)
                        worker_cpu_usage_list = []
                        for metrics in worker_metrics:
                            worker_cpu_usage_list.append(
                                metrics['cpu/usage_rate'] / 1000.0)
                        worker_cpu_diff = max(worker_cpu_usage_list) - min(
                            worker_cpu_usage_list)

                        model_name = measure_job.get_model_name()
                        logger.info("model name: " + model_name + ", kv_store: " + kv_store + ", batch_size: " + batch_size + \
                        ", num_ps: " + str(num_ps) + ", num_worker: " + str(num_worker) + \
                        ", speed_list: " + str(speed_list) + ", sum_speed (samples/second): " + str(sum(speed_list)) + \
                        ", sum_speed(batches/second): " + str(sum(speed_list)/int(batch_size)) + \
                        ", ps cpu usage diff: " + str(ps_cpu_diff) + \
                        ", worker cpu usage diff: " + str(worker_cpu_diff)
                        )
                        if counter >= 3:
                            stat = (model_name, kv_store, batch_size, num_ps,
                                    num_worker, speed_list, ps_cpu_usage_list,
                                    worker_cpu_usage_list)
                            stats.append(stat)
                            with open(txt, 'a') as f:  # append
                                #for stat in stats:
                                f.write(str(stat) + '\n')

                            measure_job.delete(True)
                            logger.info("sleep 3 seconds before next job")
                            time.sleep(3)
                            break
Beispiel #38
0
 def __init__(self, command, dispatcher):
     self.occurrences = 0
     Job.__init__(self, command, dispatcher)
Beispiel #39
0
def calculate_total_cost(house: House, job: Job):
    commute_loss = job.monthly_commute_income_loss(house.location)

    monthly_expenses = house.monthly_expences()
    salary = job.salary_after_tax()
    return salary - monthly_expenses - commute_loss
Beispiel #40
0
from life import Life

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
for not_interested in ("azure", "uamqp", " urllib3"):
    logging.getLogger(not_interested).setLevel(logging.WARN)
# create console handler with a higher log level
log_stream = logging.StreamHandler()
log_stream.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(name)s - %(message)s')
log_stream.setFormatter(formatter)
logger.addHandler(log_stream)


def calculate_total_cost(house: House, job: Job):
    commute_loss = job.monthly_commute_income_loss(house.location)

    monthly_expenses = house.monthly_expences()
    salary = job.salary_after_tax()
    return salary - monthly_expenses - commute_loss


if __name__ == "__main__":
    job = Job(Point(0, 0), 40000)
    house1 = House(4000000, Point(0, 30))
    house2 = House(2000000, Point(0, 60))

    Life(house1, job).life_monthly_value()
    Life(house2, job).life_monthly_value()
Beispiel #41
0
    def generate_job_profile(self, user_id):
        self.job_list.append(list())
        task_id = 0
        job_submit_time = dict()
        job_priority = dict()
        job_weight = dict()

        stageIdToParallelism = dict()
        for c_job_id in self.job_profile:
            # temporary setting
            job_submit_time[int(c_job_id)
                            ] = self.job_profile[c_job_id]["Submit Time"]
            job_priority[int(c_job_id)
                         ] = self.job_profile[c_job_id]["Priority"]
            job_weight[int(c_job_id)] = self.job_profile[c_job_id]["Weight"]

        for stage_id in self.stage_profile:
            timeout_type = 0
            job_id = self.stage_profile[stage_id]["Job ID"]
            self.job_durations[job_id] = 0
            Job_id = 'user_%s_job_%s' % (user_id, job_id)
            Stage_id = 'user_%s_stage_%s' % (user_id, stage_id)
            task_number = self.stage_profile[stage_id]["Task Number"]
            # change parallelism

            stageIdToParallelism[Stage_id] = task_number

            Parent_ids = list()
            if "Parents" in self.stage_profile[stage_id]:
                parent_ids = self.stage_profile[stage_id]["Parents"]
                for parent_id in parent_ids:
                    Parent_ids.append('user_%s_stage_%s' %
                                      (user_id, parent_id))
                    if stageIdToParallelism[Parent_ids[-1]] >= task_number:
                        timeout_type = 1

            # generate taskset of the stage
            taskset = list()
            max_time = 0
            for i in range(0, task_number):
                runtime = self.search_runtime(stage_id, i)
                # runtime would be changed when task is allocated to certain machine. Xiandong
                runtime *= 1
                if runtime > max_time:
                    max_time = runtime
                Task_id = 'user_%s_task_%s' % (user_id, task_id)
                # self.time_out = 100 by xiandong
                task = Task(Job_id, Stage_id, Task_id, i,
                            runtime, self.time_out, job_priority[job_id], self.threshold, self.threshold_step)
                task_id += 1
                task.user_id = user_id
                taskset.append(task)
            stage = Stage(Job_id, Stage_id, Parent_ids, taskset)
            # Now, we have jobs, stages, taskset in stages,
            # Attention, currently, we assume each job only have one stage,

            for id in Parent_ids:
                self.scheduler.stageIdToStage[id].downstream_parallelism += len(
                    taskset)

            self.scheduler.stageIdToStage[Stage_id] = stage  # dict()
            for task in taskset:
                task.stage = stage
            stage.user_id = user_id

            if self.search_job_by_id(Job_id, user_id) == False:
                job = Job(Job_id)
                job.index = int(job_id)
                job.user_id = user_id
                job.stages.append(stage)
                # we can do this because we only have one stage for each job
                job.submit_time = job_submit_time[job_id]
                job.priority = job_priority[job_id]
                job.weight = job_weight[job_id]
                self.job_list[user_id].append(job)
                stage.priority = job.priority
                stage.job = job
            else:  # this job already exits
                job = self.search_job_by_id(Job_id, user_id)
                job.stages.append(stage)
                stage.priority = job.priority
                stage.job = job

        # Set the not_completed_stage_ids for all the jobs
        for job in self.job_list[user_id]:
            job.not_completed_stage_ids = [stage.id for stage in job.stages]
            for tstage in job.stages:
                job.stagesDict[tstage.id] = tstage
            job.submitted_stage_ids = list()
            job.completed_stage_ids = list()

        # this part shall be changed, sort by the submission time of a job
        # job.submit_time
        self.job_list[user_id] = sorted(
            self.job_list[user_id], key=lambda job: job.index)  # sort job_list by job_index
Beispiel #42
0
from job import Job
from scheduler import get_ready_queue

# input the number of test cases
T = int(raw_input())
for tc in xrange(1, T + 1):

    # input the number of jobs and the pattern string
    line = str(raw_input()).split(' ')
    num_jobs = int(line[0])
    pattern = line[1]
    time_quantum = int(line[2]) if pattern == 'RR' else -1

    # create the job list
    job_list = []

    # add jobs to the list
    for i in xrange(int(num_jobs)):
        A, B, P = map(int, str(raw_input()).split(' '))
        if pattern != 'P':
            P = A  #make priority the arrival time for FCFS
        job = Job(arrival=A, duration=B, priority=P, job_id=i + 1)
        job_list.append(job)

    # get the ready queue
    print tc

    ready_queue = get_ready_queue(job_list, pattern, time_quantum)
    for job in ready_queue:
        print '%d %d %d' % (job.arrival, job.id, job.duration) + (
            'X' if job.terminate else '')
        'POPULATION_MAP': popmap_path,
        'BATCH_ID': 0,
        'OUTPUT_PATH': output_dir,
        'SAMPLES_DIRECTORY': samples_path,
        'DISABLE_DATABASE': 'True'
    }

    settings = {
        "SHELL": "!/bin/sh",
        "N_THREADS": "8",
        "MEM": "mem=8G",
        "H_VMEM": "h_vmem=10G",
        "JOB_NAME": 'de_novo_n_' + str(n)
    }

    de_novo = Job(stacks, 'denovo_map')
    de_novo.set_parameters(par)
    de_novo.set_user_settings(settings)
    de_novo.create_qsub_file(sh_file)

    qsub_file.write('qsub ' + sh_file + '\n')

for M in M_values:

    output_dir = os.path.join(results_dir, 'M_' + str(M))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    sh_file = os.path.join(shell_dir, 'denovo_map_M_' + str(M) + '.sh')

    par = {
Beispiel #44
0
def readJobs(disk, cpu):
    jobs = []
    with open(sys.argv[3], "r") as file:
        n_segments = 0
        ios = []

        # Lista de Jobs ordenada pelo tempo de início
        for line in file:
            segment_tree = Tree()
            line = line.split("\n")[0]
            items = line.split(" ")
            n_io = int(items[2])
            n_segments = int(items[5])
            # Organiza segmentos do job
            for i in range(0, n_segments):
                line = next(file)
                line = line.split("\n")[0]
                items_segment = line.split(" ")
                if len(items_segment) == 2:
                    segment_tree.add_node(int(items_segment[0]),
                                          items_segment[1])
                else:
                    segment_tree.add_node(int(items_segment[0]),
                                          items_segment[1],
                                          int(items_segment[2]))
            job = Job(items[0], items[1], None, items[3], items[4],
                      segment_tree)
            # Organiza IOs
            for i in range(0, n_io):
                line = next(file)
                line = line.split("\n")[0]
                items_io = line.split(" ")
                io_name = items_io[0]
                if io_name == DeviceType.Printer:
                    ios.append(
                        Device(
                            DeviceType.Printer,
                            int(
                                random.normal(cpu.TIMESLICE / 2,
                                              cpu.TIMESLICE / 10))))
                elif io_name == DeviceType.Reader:
                    ios.append(
                        Device(
                            DeviceType.Reader,
                            int(
                                random.normal(cpu.TIMESLICE / 2,
                                              cpu.TIMESLICE / 10))))
                elif io_name == DeviceType.Disk:
                    filename = items_io[1]
                    if items_io[2] == "r":
                        read_write = DiskOperation.Read
                    else:
                        read_write = DiskOperation.Write
                    n_opers = int(items_io[3])
                    size = int(items_io[4])
                    is_private = items_io[5]
                    sys_file = FileSystem(filename, job, size, is_private)
                    time = int(
                        random.normal(cpu.TIMESLICE / 2, cpu.TIMESLICE / 10))
                    ios.append(
                        Device(DeviceType.Disk, time, sys_file, read_write,
                               n_opers))
                    disk.addFile(sys_file)
            job.ios = ios
            # Coloca o job no segmento para facilitar impressao da memoria
            for i in range(0, segment_tree.size):
                segment_tree.__getitem__(i).job = job
            jobs.append(job)
            ios = []
    printJobs(jobs)
    return jobs
 def setUp(self):
     self.job = Job('job0', 9, 9, 1762, 0, 14347)
 def test_create_dc_dmdsec_no_dc_no_transfers_dir(self):
     """It should not fail if no transfers directory exists."""
     badsipuuid = 'dnednedn-5bd2-4249-84a1-2f00f725b981'
     dmdsec_elem = create_mets_v2.createDublincoreDMDSecFromDBData(Job("stub", "stub", []), self.siptypeuuid, badsipuuid, THIS_DIR)
     # Expect no element
     assert dmdsec_elem is None
Beispiel #47
0
    def __init__(self, gender, age):
        self.gender = gender
        self.age = age
        self.stage = next(stage for stage in self.LIFE_STAGES if age in stage.span)

        # Names
        self.name = None  # May depend on family
        self.surname = None
        self.original_surname = self.surname  # Depends on surname

        # Basics
        self.gender_identity = None  # Linked to gender
        self.sexual_orientation = None
        self.target_gender = []  # Linked to sexual_orientation
        self.race = None
        self.relationship_orientation = None
        self.can_have_bio_children = False
        self.conditions = []

        # Default vars
        self.is_alive = True
        self.is_adopted = False
        self.was_in_foster_care = False
        self.is_twin = False
        self.is_triplet = False
        self.relationship_status = self.SINGLE
        self.is_pregnant = False
        self.is_in_adoption_process = False
        self.expecting_num_of_children = -1
        self.desired_children_left = 10

        # Family
        self.parents = []
        self.children = []
        self.adoptive_parents = []
        self.adoptive_children = []
        self.partners = []
        self.spouses = []
        self.deceased_partners = []
        self.deceased_spouses = []
        self.ex_partners = []
        self.ex_spouses = []

        # Death (Default: Old age)
        self.death_date = False
        self.death_cause = self.OLD_AGE  # Depends on death_date

        # Will be initialized once Teen if applicable
        self.come_out_date = -1
        self.move_out_date = -1
        self.thrown_out_date = -1

        # Relationship traits -> Will be initialized once Young Adult
        self.is_liberal = None
        self.wants_domestic_partnership = False
        self.wants_marriage = False
        self.wants_children = False
        self.in_love_with_family = False
        self.in_love_with_intergenerational = False
        self.in_love_with_another_race = False
        self.in_love_as_throuple = False
        self.in_love_date = -1
        self.single_adoption_process_date = -1
        self.single_adoption_date = -1

        # Degree
        self.education = Education()
        self.school_start_date = -1
        self.will_do_bachelor = False
        self.will_do_master = False
        self.will_do_doctor = False

        # Professions -> Will be initialized once Young Adult
        # self.occupation = None
        # self.employment = None
        # self.current_job = None
        # self.job_history = []
        self.job = Job()

        # Addiction attributes -> Will be initialized once Young Adult
        self.will_become_drug_addict = False
        self.will_become_alcohol_addict = False
        self.addiction_date = -1
        self.is_drug_addict = False
        self.is_alcohol_addict = False
        self.was_drug_addict = False
        self.was_alcohol_addict = False
        self.rehabilitation_date = -1
        self.relapse_date = -1

        # Will be initialized if within Neighborhood
        self.apartment_id = -1
        self.is_neighbor = False
        self.neighbor_friends = []
        self.move_in_date = -1
        self.house_to_move_in = -1

        # Depression
        self.depression_date = -1
        self.therapy_date = -1
        self.depression_recovery_date = -1
 def test_dmdsec_from_csv_parsed_metadata_dc_only(self):
     """It should only create a DC dmdSec from parsed metadata."""
     data = collections.OrderedDict([
         ("dc.title", ["Yamani Weapons"]),
         ("dc.creator", ["Keladry of Mindelan"]),
         ("dc.subject", ["Glaives"]),
         ("dc.description", ["Glaives are cool"]),
         ("dc.publisher", ["Tortall Press"]),
         ("dc.contributor", [u"雪 ユキ".encode('utf8')]),
         ("dc.date", ["2015"]),
         ("dc.type", ["Archival Information Package"]),
         ("dc.format", ["parchement"]),
         ("dc.identifier", ["42/1"]),
         ("dc.source", ["Numair's library"]),
         ("dc.relation", ["None"]),
         ("dc.language", ["en"]),
         ("dc.rights", ["Public Domain"]),
         ("dcterms.isPartOf", ["AIC#42"]),
     ])
     # Test
     ret = create_mets_v2.createDmdSecsFromCSVParsedMetadata(Job("stub", "stub", []), data)
     # Verify
     assert ret
     assert len(ret) == 1
     dmdsec = ret[0]
     assert dmdsec.tag == '{http://www.loc.gov/METS/}dmdSec'
     assert 'ID' in dmdsec.attrib
     mdwrap = dmdsec[0]
     assert mdwrap.tag == '{http://www.loc.gov/METS/}mdWrap'
     assert 'MDTYPE' in mdwrap.attrib
     assert mdwrap.attrib['MDTYPE'] == 'DC'
     xmldata = mdwrap[0]
     assert xmldata.tag == '{http://www.loc.gov/METS/}xmlData'
     # Elements are children of dublincore tag
     dc_elem = xmldata[0]
     assert dc_elem.tag == '{http://purl.org/dc/terms/}dublincore'
     assert len(dc_elem) == 15
     assert dc_elem[0].tag == '{http://purl.org/dc/elements/1.1/}title'
     assert dc_elem[0].text == 'Yamani Weapons'
     assert dc_elem[1].tag == '{http://purl.org/dc/elements/1.1/}creator'
     assert dc_elem[1].text == 'Keladry of Mindelan'
     assert dc_elem[2].tag == '{http://purl.org/dc/elements/1.1/}subject'
     assert dc_elem[2].text == 'Glaives'
     assert dc_elem[3].tag == '{http://purl.org/dc/elements/1.1/}description'
     assert dc_elem[3].text == 'Glaives are cool'
     assert dc_elem[4].tag == '{http://purl.org/dc/elements/1.1/}publisher'
     assert dc_elem[4].text == 'Tortall Press'
     assert dc_elem[5].tag == '{http://purl.org/dc/elements/1.1/}contributor'
     assert dc_elem[5].text == u'雪 ユキ'
     assert dc_elem[6].tag == '{http://purl.org/dc/elements/1.1/}date'
     assert dc_elem[6].text == '2015'
     assert dc_elem[7].tag == '{http://purl.org/dc/elements/1.1/}type'
     assert dc_elem[7].text == 'Archival Information Package'
     assert dc_elem[8].tag == '{http://purl.org/dc/elements/1.1/}format'
     assert dc_elem[8].text == 'parchement'
     assert dc_elem[9].tag == '{http://purl.org/dc/elements/1.1/}identifier'
     assert dc_elem[9].text == '42/1'
     assert dc_elem[10].tag == '{http://purl.org/dc/elements/1.1/}source'
     assert dc_elem[10].text == "Numair's library"
     assert dc_elem[11].tag == '{http://purl.org/dc/elements/1.1/}relation'
     assert dc_elem[11].text == 'None'
     assert dc_elem[12].tag == '{http://purl.org/dc/elements/1.1/}language'
     assert dc_elem[12].text == 'en'
     assert dc_elem[13].tag == '{http://purl.org/dc/elements/1.1/}rights'
     assert dc_elem[13].text == 'Public Domain'
     assert dc_elem[14].tag == '{http://purl.org/dc/terms/}isPartOf'
     assert dc_elem[14].text == 'AIC#42'
Beispiel #49
0
                            par['value'] = v
                    dic_param[str(par['name'])] = par['value']
                #print('dic_param ' + str(dic_param))

                for i in fun['out_cv']:
                    #print('map',str(fun['id']),str(i))
                    #map_queue[str(fun['id']),str(i)] = queue.Queue()
                    MapQueue.queues[str(fun['id']), str(i)] = queue.Queue()

                t = FuncThread(target=eval(fun['name']),
                               kwargs=dic_param,
                               in_cv_q=fun['in_cv'],
                               out_cv_q=fun['out_cv'],
                               uid=fun['id'])
                #print(fun['level'])
                thread_queue.put(Job(fun['level'], t))

        if not os.path.exists('/tmp/images'):
            os.makedirs('/tmp/images')

        dtime = {}
        while not thread_queue.empty():
            t = thread_queue.get()
            t.thread.start()
            t.thread.join()
            dtime[t.thread.uid] = t.thread.time

        with open('/tmp/images/time.json', 'w') as f:
            json.dump(dict(time=dtime), f)

    response = dict(status='Success')
Beispiel #50
0
def run(url):
    soup = get_javascript_soup_delayed(url, 'current-openings-item')
    jobs_list = soup.find_all('div', {'class': 'current-openings-details'})
    job_class = Job(organization, "")
    job_class.organization_id = organization_id
    insert_count = 0
    for job_entry in jobs_list:
        job_class.title = job_entry.find('span', {
            'class': 'current-opening-title'
        }).text.strip()
        if job_entry.find('span', {'class': 'current-opening-location-item'}):
            job_class.location = job_entry.find(
                'span', {
                    'class': 'current-opening-location-item'
                }).text.strip()
        else:
            job_class.location = ''
        # Calculate post date relative to current date and store it
        posted_ago = job_entry.find('span', {
            'class': 'current-opening-post-date'
        }).text.split(' ')
        if posted_ago[0] == 'a':
            job_class.post_date = date_ago(1, posted_ago[1])
        elif posted_ago[0].lower() == 'today':
            job_class.post_date = date_ago(0, 'day')
        elif posted_ago[0].lower() == 'yesterday':
            job_class.post_date = date_ago(1, 'day')
        elif posted_ago[0] == '30+':
            # over 30 days ago
            job_class.post_date = date_ago(31, posted_ago[1])
        else:
            job_class.post_date = date_ago(int(posted_ago[0]), posted_ago[1])
        if job_entry.find('span',
                          {'class': 'current-opening-worker-catergory'}):
            job_class.full_or_part = job_entry.find(
                'span', {
                    'class': 'current-opening-worker-catergory'
                }).text.strip()
        else:
            job_class.full_or_part = ''
        job_class.info_link = 'https://workforcenow.adp.com/mascsr/default/mdf/recruitment/recruitment.html?cid=b4842dc2-cd32-4f0f-88d3-b259fbc96f09&ccId=19000101_000001&type=MP&lang'
        insert_count += job_insert(job_class)
    return insert_count
Beispiel #51
0
 def test_get_included_structmap_valid_mets(self):
     """Test the valid output of custom structmaps in create_mets_v2."""
     self.generate_aip_mets_v2_state()
     self._fixup_fileid_state()
     default_structmap = "mets_structmap.xml"
     Result = collections.namedtuple(
         "Result", "structmap_name files replaced_count structmap_id"
     )
     results = [
         Result(None, ["objects/test_file.flac"], 1, None),
         Result(
             "simple_book_structmap.xml",
             ["objects/test_file.jpg", "objects/test_file.png"],
             2,
             None,
         ),
         Result("mets_area_structmap.xml", ["test_file.mp3"], 6, None),
         Result(
             "unicode_simple_book_structmap.xml",
             ["objects/página_de_prueba.jpg", "objects/página_de_prueba.png"],
             2,
             "custom_structmap",
         ),
         Result(
             "nested_file_structmap.xml",
             ["objects/nested_dir/nested_file.rdata"],
             6,
             None,
         ),
         Result(
             "complex_book_structmap.xml",
             [
                 "objects/nested_dir/duplicate_file_name.png",
                 "objects/duplicate_file_name.png",
             ],
             2,
             None,
         ),
         Result(
             "path_with_spaces_structmap.xml",
             ["objects/dir-with-dashes/file with spaces.bin"],
             1,
             None,
         ),
     ]
     for res in results:
         structmap_path = os.path.join(
             self.objects_dir,
             "metadata",
             "transfers",
             "custom-structmap-41ab1f1a-34d0-4a83-a2a3-0ad1b1ee1c51",
             (default_structmap if not res.structmap_name else res.structmap_name),
         )
         assert os.path.isfile(structmap_path)
         assert os.path.isfile(self.mets_xsd_path)
         self.validate_mets(self.mets_xsd_path, structmap_path)
         # Ensure that we test default behavior.
         if not res.structmap_name:
             custom_structmap = create_mets_v2.include_custom_structmap(
                 job=Job("stub", "stub", []),
                 baseDirectoryPath=self.transfer_dir,
                 state=self.state,
             )[0]
         else:
             # Expand the scope of testing to all our sample structmaps.
             custom_structmap = create_mets_v2.include_custom_structmap(
                 job=Job("stub", "stub", []),
                 baseDirectoryPath=self.transfer_dir,
                 state=self.state,
                 custom_structmap=res.structmap_name,
             )[0]
         # All custom structmaps that are used and return from this function
         # should remain valid.
         self.validate_mets(self.mets_xsd_path, custom_structmap)
         assert custom_structmap.tag == "{{{}}}structMap".format(ns.metsNS)
         if not res.structmap_id:
             assert custom_structmap.attrib["ID"].lower() == "structmap_{}".format(
                 self.state.globalStructMapCounter
             ), "structmap id is incorrect"
         else:
             assert (
                 custom_structmap.attrib["ID"].lower() == res.structmap_id
             ), "structmap id hasn't been maintained"
         fids = custom_structmap.xpath(
             "//*[@FILEID]", namespaces={"mets:": ns.metsNS}
         )
         assert len(fids) == res.replaced_count, "Count of FILEIDs is incorrect"
         assert len(set([fid.attrib["FILEID"] for fid in fids])) == len(
             res.files
         ), "Uneven replacement of IDs for files in structmap"
         for fileid in [fid.attrib["FILEID"] for fid in fids]:
             assert (
                 fileid in self.state.fileNameToFileID.values()
             ), "Expected FILEID not in returned structmap"
Beispiel #52
0
 def GET(self):
     data = web.input(size=[])
     j = Job(5, 'fill', options.output, data.size[0])
     runQueue.put(j)
     return j["jobid"]
Beispiel #53
0
 def GET(self):
     j = Job(1, 'stop')
     runQueue.put(j)
     return j["jobid"]
Beispiel #54
0
    if res:
        job_id = (int(float(res.group(SwfField.JOB_ID.value))))
        nb_res = int(float(res.group(
            SwfField.ALLOCATED_PROCESSOR_COUNT.value)))
        wait_time = float(res.group(SwfField.WAIT_TIME.value))
        run_time = float(res.group(SwfField.RUN_TIME.value))
        submit_time = max(0, float(res.group(SwfField.SUBMIT_TIME.value)))
        wall_time = max(run_time,
                        float(res.group(SwfField.REQUESTED_TIME.value)))

        if (nb_res > 0 and wall_time > run_time and run_time > 0
                and submit_time >= 0):
            job = Job(job_id=job_id,
                      nb_res=nb_res,
                      wait_time=wait_time,
                      run_time=run_time,
                      submit_time=submit_time,
                      wall_time=wall_time)

            jobs[job_id] = job

##############
# Simulation #
##############

# Let's simulate where the jobs should have been placed by a simple
# "take first resources" policy. Let's create a list of events
events = []

for job_id in jobs:
    job = jobs[job_id]
Beispiel #55
0
 def test_dmdsec_from_csv_parsed_metadata_dc_only(self):
     """It should only create a DC dmdSec from parsed metadata."""
     data = collections.OrderedDict(
         [
             ("dc.title", ["Yamani Weapons"]),
             ("dc.creator", ["Keladry of Mindelan"]),
             ("dc.subject", ["Glaives"]),
             ("dc.description", ["Glaives are cool"]),
             ("dc.publisher", ["Tortall Press"]),
             ("dc.contributor", ["雪 ユキ".encode("utf8")]),
             ("dc.date", ["2015"]),
             ("dc.type", ["Archival Information Package"]),
             ("dc.format", ["parchement"]),
             ("dc.identifier", ["42/1"]),
             ("dc.source", ["Numair's library"]),
             ("dc.relation", ["None"]),
             ("dc.language", ["en"]),
             ("dc.rights", ["Public Domain"]),
             ("dcterms.isPartOf", ["AIC#42"]),
         ]
     )
     # Test
     state = create_mets_v2.MetsState()
     ret = create_mets_v2.createDmdSecsFromCSVParsedMetadata(
         Job("stub", "stub", []), data, state
     )
     # Verify
     assert ret
     assert len(ret) == 1
     dmdsec = ret[0]
     assert dmdsec.tag == "{http://www.loc.gov/METS/}dmdSec"
     assert "ID" in dmdsec.attrib
     mdwrap = dmdsec[0]
     assert mdwrap.tag == "{http://www.loc.gov/METS/}mdWrap"
     assert "MDTYPE" in mdwrap.attrib
     assert mdwrap.attrib["MDTYPE"] == "DC"
     xmldata = mdwrap[0]
     assert xmldata.tag == "{http://www.loc.gov/METS/}xmlData"
     # Elements are children of dublincore tag
     dc_elem = xmldata[0]
     assert dc_elem.tag == "{http://purl.org/dc/terms/}dublincore"
     assert len(dc_elem) == 15
     assert dc_elem[0].tag == "{http://purl.org/dc/elements/1.1/}title"
     assert dc_elem[0].text == "Yamani Weapons"
     assert dc_elem[1].tag == "{http://purl.org/dc/elements/1.1/}creator"
     assert dc_elem[1].text == "Keladry of Mindelan"
     assert dc_elem[2].tag == "{http://purl.org/dc/elements/1.1/}subject"
     assert dc_elem[2].text == "Glaives"
     assert dc_elem[3].tag == "{http://purl.org/dc/elements/1.1/}description"
     assert dc_elem[3].text == "Glaives are cool"
     assert dc_elem[4].tag == "{http://purl.org/dc/elements/1.1/}publisher"
     assert dc_elem[4].text == "Tortall Press"
     assert dc_elem[5].tag == "{http://purl.org/dc/elements/1.1/}contributor"
     assert dc_elem[5].text == "雪 ユキ"
     assert dc_elem[6].tag == "{http://purl.org/dc/elements/1.1/}date"
     assert dc_elem[6].text == "2015"
     assert dc_elem[7].tag == "{http://purl.org/dc/elements/1.1/}type"
     assert dc_elem[7].text == "Archival Information Package"
     assert dc_elem[8].tag == "{http://purl.org/dc/elements/1.1/}format"
     assert dc_elem[8].text == "parchement"
     assert dc_elem[9].tag == "{http://purl.org/dc/elements/1.1/}identifier"
     assert dc_elem[9].text == "42/1"
     assert dc_elem[10].tag == "{http://purl.org/dc/elements/1.1/}source"
     assert dc_elem[10].text == "Numair's library"
     assert dc_elem[11].tag == "{http://purl.org/dc/elements/1.1/}relation"
     assert dc_elem[11].text == "None"
     assert dc_elem[12].tag == "{http://purl.org/dc/elements/1.1/}language"
     assert dc_elem[12].text == "en"
     assert dc_elem[13].tag == "{http://purl.org/dc/elements/1.1/}rights"
     assert dc_elem[13].text == "Public Domain"
     assert dc_elem[14].tag == "{http://purl.org/dc/terms/}isPartOf"
     assert dc_elem[14].text == "AIC#42"
Beispiel #56
0
def upload():
    """
    Handles a file upload.
    """
    if dispatcher is None:
        print("Error: You are not connected to the server. Use CONN command first.")
        return

    file_name = input("Enter the name of the local file to upload: ")

    if file_name == "":
        return

    try:
        with open(SUBDIR + file_name, "rb") as binary_file:
            # Read the file
            file_contents = binary_file.read()

        # Ask about high reliability
        hr = ""
        while hr != "Y" and hr != "N":
            hr = input("Use high reliability? [Y/N]: ").upper()

        high_reliability = True if hr == "Y" else False
        print("Using high reliability: {}".format(high_reliability))

        if len(file_contents) > 2 * 2**20:
            print("Warning: This file is large (> 2 MB) and may take a while to upload!")

        print("Uploading...")

        t0 = time()

        # Send the command and file name
        dispatcher.put_job(Job("UPLD_INIT", data={
            "file_name": file_name,
            "high_reliability": high_reliability,
            "file_size": len(file_contents)
        }))

        # Check the server is ready to receive
        result = dispatcher.get_external_result()
        if result.result["outcome"] != "ready to receive":
            print("Error, server not ready to receive: {}".format(result.result["outcome"]))
            return

        # Send the file contents to the server
        upld_data_job = Job("UPLD_DATA",
                            token=result.token,
                            data={"file_name": file_name,
                                  "file_contents": file_contents,
                                  "high_reliability": high_reliability,
                                  "file_size": len(file_contents)})
        dispatcher.put_job(upld_data_job)

        # Get the response from the upload
        result = dispatcher.get_external_result().result

        t1 = time()

        if result["outcome"] != "success":
            print("Something went wrong. Response was: {}".format(result))
        else:
            print("Successfully uploaded {} ({:,} bytes) in {:,} seconds.".format(result["file_name"],
                                                                                  result["file_size"],
                                                                                  round(t1 - t0, 4)))

    except FileNotFoundError as e:
        print("Error: File '{}' not found.".format(file_name))
Beispiel #57
0
 def getJobsWithName(self, name):
     jobs = self.jobs.find(name=name)
     jobs = [Job(j, self) for j in jobs]
     return jobs
 def setUp(self):
     self.job = Job('job0', 9, 9, 0, 10000, 0)
Beispiel #59
0
 def peek(self, count=None):
     '''Similar to the pop command, except that it merely peeks at the next items'''
     results = [Job(self.client, **json.loads(r)) for r in self.client._peek([self.name], [count or 1, repr(time.time())])]
     if count == None:
         return (len(results) and results[0]) or None
     return results
    def test_rows_processed_and_database_content(self):
        """Test CSV import using the RightsReader class.

        It should process valid rows of the CSV file.
        It should skip the third row data as basis/act is duplicate of earlier row.
        It should populate the rights-related models using data from the CSV file.
        """
        rights_csv_filepath = os.path.join(THIS_DIR, 'fixtures/rights.csv')
        parser = rights_from_csv.RightCsvReader(Job("stub", "stub",
                                                    []), self.transfer_uuid,
                                                rights_csv_filepath)
        rows_processed = parser.parse()

        # Test rows processed and model intance counts
        assert rows_processed == 8
        assert models.RightsStatement.objects.count(
        ) == 7  # One row in fixture CSV skipped due to duplicate basis/act combination
        assert models.RightsStatementLicense.objects.count() == 1
        assert models.RightsStatementCopyright.objects.count() == 2
        assert models.RightsStatementStatuteInformation.objects.count() == 1
        assert models.RightsStatementOtherRightsInformation.objects.count(
        ) == 3
        assert models.RightsStatementCopyrightDocumentationIdentifier.objects.count(
        ) == 2
        assert models.RightsStatementCopyrightNote.objects.count() == 2
        assert models.RightsStatementLicenseDocumentationIdentifier.objects.count(
        ) == 1
        assert models.RightsStatementLicenseNote.objects.count() == 1
        assert models.RightsStatementStatuteDocumentationIdentifier.objects.count(
        ) == 1
        assert models.RightsStatementStatuteInformationNote.objects.count(
        ) == 1
        assert models.RightsStatementOtherRightsDocumentationIdentifier.objects.count(
        ) == 0  # Not created as all related columns are blank
        assert models.RightsStatementOtherRightsInformationNote.objects.count(
        ) == 1
        assert models.RightsStatementRightsGranted.objects.count() == 7
        assert models.RightsStatementRightsGrantedRestriction.objects.count(
        ) == 5
        assert models.RightsStatementRightsGrantedNote.objects.count() == 3

        # Test row 1
        row_1_rights_statement = models.RightsStatement.objects.order_by(
            'pk')[0]
        assert row_1_rights_statement.metadataappliestotype == self.get_metadata_applies_to_type_for_file(
        )
        assert row_1_rights_statement.metadataappliestoidentifier == self.file_1_uuid
        assert row_1_rights_statement.status == 'ORIGINAL'
        assert row_1_rights_statement.rightsbasis == 'Copyright'

        row_1_copyright_info = models.RightsStatementCopyright.objects.order_by(
            'pk')[0]
        assert row_1_copyright_info.rightsstatement == row_1_rights_statement
        assert row_1_copyright_info.copyrightstatus == 'cop status'
        assert row_1_copyright_info.copyrightjurisdiction == 'cop juris'
        assert row_1_copyright_info.copyrightstatusdeterminationdate == '2001-01-01'
        assert row_1_copyright_info.copyrightapplicablestartdate == '2002-02-02'
        assert row_1_copyright_info.copyrightenddateopen is False
        assert row_1_copyright_info.copyrightapplicableenddate == '2003-03-03'

        row_1_copyright_identifier = models.RightsStatementCopyrightDocumentationIdentifier.objects.order_by(
            'pk')[0]
        assert row_1_copyright_identifier.copyrightdocumentationidentifiertype == 'cop type'
        assert row_1_copyright_identifier.copyrightdocumentationidentifierrole == 'cop role'

        row_1_copyright_note = models.RightsStatementCopyrightNote.objects.order_by(
            'pk')[0]
        assert row_1_copyright_note.rightscopyright == row_1_copyright_info
        assert row_1_copyright_note.copyrightnote == 'cop note'

        row_1_grant = models.RightsStatementRightsGranted.objects.order_by(
            'pk')[0]
        assert row_1_grant.rightsstatement == row_1_rights_statement
        assert row_1_grant.act == 'cop act'
        assert row_1_grant.startdate == '2004-04-04'
        assert row_1_grant.enddateopen is False
        assert row_1_grant.enddate == '2005-05-05'

        row_1_restriction = models.RightsStatementRightsGrantedRestriction.objects.order_by(
            'pk')[0]
        assert row_1_restriction.rightsgranted == row_1_grant
        assert row_1_restriction.restriction == 'Allow'

        row_1_grant_note = models.RightsStatementRightsGrantedNote.objects.order_by(
            'pk')[0]
        assert row_1_grant_note.rightsgranted == row_1_grant
        assert row_1_grant_note.rightsgrantednote == 'cop grant note'

        # Test row 3 (row 2 is skipped as it has the same act and basis as a previous right for the file)
        row_3_rights_statement = models.RightsStatement.objects.order_by(
            'pk')[1]
        assert row_3_rights_statement.metadataappliestotype == self.get_metadata_applies_to_type_for_file(
        )
        assert row_3_rights_statement.metadataappliestoidentifier == self.file_1_uuid
        assert row_3_rights_statement.status == 'ORIGINAL'
        assert row_3_rights_statement.rightsbasis == 'Copyright'

        row_3_copyright_info = models.RightsStatementCopyright.objects.order_by(
            'pk')[1]
        assert row_3_copyright_info.rightsstatement == row_3_rights_statement
        assert row_3_copyright_info.copyrightstatus == 'cop status3'
        assert row_3_copyright_info.copyrightjurisdiction == 'cop juris3'
        assert row_3_copyright_info.copyrightstatusdeterminationdate == '2001-01-01'
        assert row_3_copyright_info.copyrightapplicablestartdate == '2002-02-02'
        assert row_3_copyright_info.copyrightenddateopen is False
        assert row_3_copyright_info.copyrightapplicableenddate == '2003-03-03'

        row_3_copyright_identifier = models.RightsStatementCopyrightDocumentationIdentifier.objects.order_by(
            'pk')[1]
        assert row_3_copyright_identifier.copyrightdocumentationidentifiertype == 'cop type3'
        assert row_3_copyright_identifier.copyrightdocumentationidentifierrole is None

        row_3_copyright_note = models.RightsStatementCopyrightNote.objects.order_by(
            'pk')[1]
        assert row_3_copyright_note.rightscopyright == row_3_copyright_info
        assert row_3_copyright_note.copyrightnote == 'cop note 3'

        row_3_grant = models.RightsStatementRightsGranted.objects.order_by(
            'pk')[1]
        assert row_3_grant.rightsstatement == row_3_rights_statement
        assert row_3_grant.act == 'cop act2'
        assert row_3_grant.startdate == '2004-04-04'
        assert row_3_grant.enddateopen is False
        assert row_3_grant.enddate == '2005-05-05'

        row_3_restriction = models.RightsStatementRightsGrantedRestriction.objects.order_by(
            'pk')[1]
        assert row_3_restriction.rightsgranted == row_3_grant
        assert row_3_restriction.restriction == 'Allow'

        row_3_grant_note = models.RightsStatementRightsGrantedNote.objects.order_by(
            'pk')[1]
        assert row_3_grant_note.rightsgranted == row_3_grant
        assert row_3_grant_note.rightsgrantednote == 'cop grant note3'

        # Test row 4
        row_4_rights_statement = models.RightsStatement.objects.order_by(
            'pk')[2]
        assert row_4_rights_statement.metadataappliestotype == self.get_metadata_applies_to_type_for_file(
        )
        assert row_4_rights_statement.metadataappliestoidentifier == self.file_1_uuid
        assert row_4_rights_statement.status == 'ORIGINAL'
        assert row_4_rights_statement.rightsbasis == 'License'

        row_4_license_info = models.RightsStatementLicense.objects.order_by(
            'pk')[0]
        assert row_4_license_info.rightsstatement == row_4_rights_statement
        assert row_4_license_info.licenseterms == 'lic terms'
        assert row_4_license_info.licenseapplicablestartdate == '1982-01-01'
        assert row_4_license_info.licenseenddateopen is False
        assert row_4_license_info.licenseapplicableenddate == '1983-02-02'

        row_4_license_identifier = models.RightsStatementLicenseDocumentationIdentifier.objects.order_by(
            'pk')[0]
        assert row_4_license_identifier.licensedocumentationidentifiertype == 'license type'
        assert row_4_license_identifier.licensedocumentationidentifiervalue == 'license value'
        assert row_4_license_identifier.licensedocumentationidentifierrole is None

        row_4_license_note = models.RightsStatementLicenseNote.objects.order_by(
            'pk')[0]
        assert row_4_license_note.rightsstatementlicense == row_4_license_info
        assert row_4_license_note.licensenote == 'lic note'

        row_4_grant = models.RightsStatementRightsGranted.objects.order_by(
            'pk')[2]
        assert row_4_grant.rightsstatement == row_4_rights_statement
        assert row_4_grant.act == 'lic act'
        assert row_4_grant.startdate is None
        assert row_4_grant.enddateopen is False
        assert row_4_grant.enddate is None

        row_4_restriction = models.RightsStatementRightsGrantedRestriction.objects.order_by(
            'pk')[2]
        assert row_4_restriction.rightsgranted == row_4_grant
        assert row_4_restriction.restriction == 'Allow'

        # Test row 5
        row_5_rights_statement = models.RightsStatement.objects.order_by(
            'pk')[3]
        assert row_5_rights_statement.metadataappliestotype == self.get_metadata_applies_to_type_for_file(
        )
        assert row_5_rights_statement.metadataappliestoidentifier == self.file_1_uuid
        assert row_5_rights_statement.status == 'ORIGINAL'
        assert row_5_rights_statement.rightsbasis == 'Statute'

        row_5_statute_info = models.RightsStatementStatuteInformation.objects.order_by(
            'pk')[0]
        assert row_5_statute_info.rightsstatement == row_5_rights_statement
        assert row_5_statute_info.statutejurisdiction == 'stat juris'
        assert row_5_statute_info.statutedeterminationdate == '1972-02-02'
        assert row_5_statute_info.statutecitation == 'stat cit'
        assert row_5_statute_info.statuteapplicablestartdate == '1966-01-01'
        assert row_5_statute_info.statuteenddateopen is True
        assert row_5_statute_info.statuteapplicableenddate is None

        row_5_statute_identifier = models.RightsStatementStatuteDocumentationIdentifier.objects.order_by(
            'pk')[0]
        assert row_5_statute_identifier.statutedocumentationidentifiertype == 'statute type'
        assert row_5_statute_identifier.statutedocumentationidentifiervalue == 'statute value'
        assert row_5_statute_identifier.statutedocumentationidentifierrole == 'statute role'

        row_5_statute_note = models.RightsStatementStatuteInformationNote.objects.order_by(
            'pk')[0]
        assert row_5_statute_note.rightsstatementstatute == row_5_statute_info
        assert row_5_statute_note.statutenote == 'statute note'

        row_5_grant = models.RightsStatementRightsGranted.objects.order_by(
            'pk')[3]
        assert row_5_grant.rightsstatement == row_5_rights_statement
        assert row_5_grant.act == 'stat act'
        assert row_5_grant.startdate is None
        assert row_5_grant.enddateopen is False
        assert row_5_grant.enddate is None

        row_5_restriction = models.RightsStatementRightsGrantedRestriction.objects.order_by(
            'pk')[3]
        assert row_5_restriction.rightsgranted == row_5_grant
        assert row_5_restriction.restriction == 'Allow'

        # Test row 6
        row_6_rights_statement = models.RightsStatement.objects.order_by(
            'pk')[4]
        assert row_6_rights_statement.metadataappliestotype == self.get_metadata_applies_to_type_for_file(
        )
        assert row_6_rights_statement.metadataappliestoidentifier == self.file_1_uuid
        assert row_6_rights_statement.status == 'ORIGINAL'
        assert row_6_rights_statement.rightsbasis == 'Other'

        row_6_other_info = models.RightsStatementOtherRightsInformation.objects.order_by(
            'pk')[0]
        assert row_6_other_info.rightsstatement == row_6_rights_statement
        assert row_6_other_info.otherrightsbasis == 'Other'
        assert row_6_other_info.otherrightsapplicablestartdate == '1945-01-01'
        assert row_6_other_info.otherrightsenddateopen is False
        assert row_6_other_info.otherrightsapplicableenddate == '1950-05-05'

        row_6_other_note = models.RightsStatementOtherRightsInformationNote.objects.order_by(
            'pk')[0]
        assert row_6_other_note.rightsstatementotherrights == row_6_other_info
        assert row_6_other_note.otherrightsnote == 'other note'

        row_6_grant = models.RightsStatementRightsGranted.objects.order_by(
            'pk')[4]
        assert row_6_grant.rightsstatement == row_6_rights_statement
        assert row_6_grant.act == 'other act'
        assert row_6_grant.startdate == '1920-01-01'
        assert row_6_grant.enddateopen is False
        assert row_6_grant.enddate == '1921-01-01'

        row_6_restriction = models.RightsStatementRightsGrantedRestriction.objects.order_by(
            'pk')[4]
        assert row_6_restriction.rightsgranted == row_6_grant
        assert row_6_restriction.restriction == 'Allow'

        row_6_grant_note = models.RightsStatementRightsGrantedNote.objects.order_by(
            'pk')[2]
        assert row_6_grant_note.rightsgranted == row_6_grant
        assert row_6_grant_note.rightsgrantednote == 'other grant note'

        # Test row 7
        row_7_rights_statement = models.RightsStatement.objects.order_by(
            'pk')[5]
        assert row_7_rights_statement.metadataappliestotype == self.get_metadata_applies_to_type_for_file(
        )
        assert row_7_rights_statement.metadataappliestoidentifier == self.file_2_uuid
        assert row_7_rights_statement.status == 'ORIGINAL'
        assert row_7_rights_statement.rightsbasis == 'Donor'

        row_7_other_info = models.RightsStatementOtherRightsInformation.objects.order_by(
            'pk')[1]
        assert row_7_other_info.rightsstatement == row_7_rights_statement
        assert row_7_other_info.otherrightsbasis == 'Donor'
        assert row_7_other_info.otherrightsapplicablestartdate is None
        assert row_7_other_info.otherrightsenddateopen is False
        assert row_7_other_info.otherrightsapplicableenddate is None

        row_7_grant = models.RightsStatementRightsGranted.objects.order_by(
            'pk')[5]
        assert row_7_grant.rightsstatement == row_7_rights_statement
        assert row_7_grant.act == 'donor act'
        assert row_7_grant.startdate is None
        assert row_7_grant.enddateopen is False
        assert row_7_grant.enddate is None

        # Test row 8
        row_8_rights_statement = models.RightsStatement.objects.order_by(
            'pk')[6]
        assert row_8_rights_statement.metadataappliestotype == self.get_metadata_applies_to_type_for_file(
        )
        assert row_8_rights_statement.metadataappliestoidentifier == self.file_2_uuid
        assert row_8_rights_statement.status == 'ORIGINAL'
        assert row_8_rights_statement.rightsbasis == 'Policy'

        row_8_other_info = models.RightsStatementOtherRightsInformation.objects.order_by(
            'pk')[2]
        assert row_8_other_info.rightsstatement == row_8_rights_statement
        assert row_8_other_info.otherrightsbasis == 'Policy'
        assert row_8_other_info.otherrightsapplicablestartdate is None
        assert row_8_other_info.otherrightsenddateopen is False
        assert row_8_other_info.otherrightsapplicableenddate is None

        row_8_grant = models.RightsStatementRightsGranted.objects.order_by(
            'pk')[6]
        assert row_8_grant.rightsstatement == row_8_rights_statement
        assert row_8_grant.act == 'policy act'
        assert row_8_grant.startdate is None
        assert row_8_grant.enddateopen is False
        assert row_8_grant.enddate is None