Exemplo n.º 1
0
    def generate_jobs(self):
        jobs = []
        exec_time = self.exec_time
        for i in range(self.iterations):
            jobs.append(
                Job(
                    line_task_id=self.id,
                    line_id=self.line_id,
                    device_id=self.device_id,
                    desired_state=1,
                    exec_time=exec_time,
                    expire_time=exec_time + timedelta(minutes=1),
                )
            )
            jobs.append(
                Job(
                    line_task_id=self.id,
                    line_id=self.line_id,
                    device_id=self.device_id,
                    desired_state=0,
                    exec_time=exec_time + timedelta(minutes=self.time),
                    expire_time=exec_time + timedelta(minutes=self.time + 1),

                )
            )
            exec_time = exec_time + timedelta(minutes=self.time_sleep)
            self.expire_time = exec_time + timedelta(minutes=1)

        self.jobs = jobs

        return self
Exemplo n.º 2
0
def generate_random_jobs(player_cities, current_city_id, count=30):
    """
    Generate a set of random jobs for a city

    :param player_cities:           Dict of current player cities
    :param current_city_id:         Id of current city the jobs are generated at
    :param count:                   Number of jobs to generate
    :return:                        Dict of jobs
    """
    logging.info(
        f'Generating {count} random job for city_id: {current_city_id}')
    player_city_ids = [
        city_id for city_id in player_cities.keys()
        if city_id != current_city_id
    ]

    jobs = {}

    for _ in range(count):

        job = Job(origin_city_id=current_city_id,
                  destination_city_id=random.choice(player_city_ids),
                  revenue=calculate_job_revenue(
                      current_city_id, random.choice(player_city_ids)),
                  job_type=random.choice(['P', 'C']))
        jobs[job.id] = job.serialize()

    return jobs
Exemplo n.º 3
0
    def post(self):
        """
        Executes a prep job to create an image corpus for training.
        Use this method to start a prep job.
        """
        job_def = request.json
        job_def['process_json'] = True # Hardcode to process json file from project folder
        job = Job(job_def['name'],job_def)
        job.type = 'preprocess'
        dt = newdt.now()
        job.start_time = int(dt.timestamp()*1000)
        job.request = {'full_path': request.full_path,'remote_addr':request.remote_addr,'method':request.method}
        jb = aug_queue.enqueue(
             preprocess, job,job_timeout=-1,result_ttl=86400,ttl=-1)
        jb.meta['job_def'] = job_def
        dt = newdt.now()
        jb.meta['job_init_time'] = str(int(dt.timestamp()*1000))
        jb.status = 'Running'
        jb.save_meta()
        json_str = job.to_json_string()
        st = {
            'BUCKET' : job.bucket,
            'USE_GCS' : job.use_gcs,
            'ACCESS_KEY' : access_key,
            'SECRET_KEY' : secret_key,
            'S3_URL' : s3_url
        }
        storage = Storage(st)
        storage.upload_data(json_str,'jobs/running/{}_0_preprocess_r_{}.json'.format(str(job.start_time),jb.id),contentType='application/json')
        storage.upload_data(json_str,'jobs/all/{}_0_preprocess_r_{}.json'.format(str(job.start_time),jb.id),contentType='application/json')

        return {
            "status": jb.status,
            'job_id': jb.id,
            'meta':jb.meta},201
Exemplo n.º 4
0
    def test_train_mlengine(self):
        train_job.train_job['name'] = str(uuid.uuid4())
        job = Job(train_job.train_job['name'], train_job.train_job)
        job.type = 'preprocess'
        job.init_temp(str(uuid.uuid4()))
        try:
            logging.info("step1")
            job.init_storage()
            logging.info("step2")
            if not hasattr(job, 'label_file') or job.label_file is None:
                job.label_file = 'corpus/' + job.prep_name + "/object-detection.pbtxt"
            job.init_labels()
            self.assertGreater(len(job.categories), 0)
            logging.info("step3")
            source = json.loads(
                job.download_to_string('corpus/' + job.prep_name +
                                       "/job_def.json"))
            job.project_name = source['project_name']
            logging.info("step4")

            updateFileML(job)
            logging.info("step5")
            upload_model(job)
            logging.info("step6")
            upload_packages(job)
            logging.info("step7")
            start_ml_engine(job)
            logging.info("step8")
            history = json.loads(
                job.download_to_string(
                    'corpus/' + job.prep_name + "/job_history.json", ))
            upload_metadata(job, "training_jobs/" + job.name, history)
        finally:
            job.cleanup()
Exemplo n.º 5
0
def job():

    # {
    # "job_title": "full-stack Developer",
    # "job_skills": [ "React.js", "JavaScript" ]
    # }

    # curl -i -H "Content-Type: application/json" -X POST -d '{"job_title": "FrontEnd Engineer1", "job_skills": [ "HTML2", "C33SS", "JadevaScript", "Sfffass" ]}' http://localhost:5000/api/job

    data = request.json

    new_job = Job(title=data["job_title"])

    for skill_name in data["job_skills"]:
        # Find skill in DB:
        skill = Skill.query.filter_by(name=skill_name).first()

        # check if skill already in DB:
        exists = skill is not None
        if not exists:
            skill = Skill(name=skill_name)  # Create new skill row in DB
            db.session.add(skill)           # store in DB:

        # add required skill to job opening
        new_job.skills.append(skill)

    db.session.add(new_job)
    db.session.commit()

    return job_schema.dump(new_job)
Exemplo n.º 6
0
        def _decorate(*args, **kwargs):
            arguments = json.dumps(dict(args=args, kwargs=kwargs))  # 简单参数类型,不用过于复杂

            # 任务放入mongodb
            job = Job()
            job.module_name = module_name
            job.function_name = function_name
            job.arguments = arguments
            job.queue_type = queue_key
            job.save()
Exemplo n.º 7
0
def add_job():
    attributes = request.json
    job = Job(**attributes)
    try:
        session.add(job)
        session.commit()
        response = jsonify({"code": 1, "message": "Successfully added job"})
        return make_response(response, 201)
    except:
        session.rollback()
        response = jsonify({"code": -1, "message": "role alreasy exist"})
        return make_response(response, 401)
Exemplo n.º 8
0
def test(logger):
    job = Job(logger=logger)
    job.algorithm = methods.armada
    job.seperator = ','
    job.dataset = 'datasets/Vent-minute-3.csv'
    job.columns = columns.vent_columns
    job.getState = methods.vent_getState
    job.minSupport = 0.7
    job.maxGap = pa.to_timedelta('24:00:00')

    job.useGenericPreprocessor()

    return job
Exemplo n.º 9
0
def tpminerVentSetup(logger):
    job = Job(logger=logger, label='TPMiner main vent')
    job.algorithm = tpminer
    job.seperator = ','
    job.dataset = 'datasets/Vent-minute-12.csv'
    job.columns = col.vent_columns
    job.getState = vent_getState
    job.minSupport = 0.1
    job.maxGap = pa.to_timedelta('24:00:00')

    job.useGenericPreprocessor()

    return job
Exemplo n.º 10
0
def tpminerLoadSetup(logger):
    job = Job(logger=logger, label='TPMiner load')
    job.algorithm = tpminer
    job.seperator = ','
    job.dataset = 'datasets/Load-minute-12.csv'
    job.columns = col.load_columns
    job.getState = load_getState
    job.minSupport = 0.5
    job.maxGap = pa.to_timedelta('24:00:00')

    job.useGenericPreprocessor()

    return job
Exemplo n.º 11
0
def tpminerWeatherSetup(logger):
    job = Job(logger=logger, label='TPMiner weather-crash')
    job.algorithm = tpminer
    job.minSupport = 0.15
    job.maxGap = pa.to_timedelta('24:00:00')
    job.dataset = 'datasets/Weather-Crash.csv'
    preprocessor = WeatherCrashPreprocessor(
        'datasets/weather.csv',
        'datasets/Motor_Vehicle_Collisions_-_Crashes.csv',
        logger)
    job.preprocessor = preprocessor

    return job
Exemplo n.º 12
0
def client_home_page():
    global jobs_index

    job_type = request.form['job_type']
    address = request.form['address']
    description = request.form['description']

    job = Job(jobs_index, job_type, address, description,
              session.get('username'))
    jobs_index += 1

    global_dict['jobs'].append(job)
    print_jobs()

    return jobs_by_client()
Exemplo n.º 13
0
    def test_train_mlengine_copy(self):
        train_job.train_job['name'] = str(uuid.uuid4())
        job = Job(train_job.train_job['name'], train_job.train_job)
        job.type = 'preprocess'
        job.init_temp(str(uuid.uuid4()))
        try:
            logging.info("step1")
            job.init_storage()
            logging.info("step2")
            if hasattr(job,
                       'source_training') and job.source_training is not '':
                sjd = json.loads(
                    job.download_to_string('training_jobs/' +
                                           job.source_training +
                                           "/job_def.json"))
                job.num_train_steps += sjd['num_train_steps']
                job.model = sjd['model']
                st = 'training_jobs/{}/'.format(job.source_training)
                dt = 'training_jobs/{}/'.format(job.name)
                job.copy_folder(st, dt)
                job.delete_cloud_file('{}{}'.format(dt, "job_def.json"))
                job.delete_cloud_file('{}{}'.format(dt, "job_history.json"))
            logging.info("step3")
            if not hasattr(job, 'label_file') or job.label_file is None:
                job.label_file = 'corpus/' + job.prep_name + "/object-detection.pbtxt"
            job.init_labels()
            self.assertGreater(len(job.categories), 0)
            logging.info("step4")
            source = json.loads(
                job.download_to_string('corpus/' + job.prep_name +
                                       "/job_def.json"))
            job.project_name = source['project_name']
            logging.info("step5")

            updateFileML(job)
            logging.info("step6")
            upload_model(job)
            logging.info("step7")
            upload_packages(job)
            logging.info("step8")
            start_ml_engine(job)
            logging.info("step9")
            history = json.loads(
                job.download_to_string(
                    'corpus/' + job.prep_name + "/job_history.json", ))
            upload_metadata(job, "training_jobs/" + job.name, history)
        finally:
            job.cleanup()
Exemplo n.º 14
0
    def post(self):
        """
        Executes a training.
        Use this method to start a training.
        """
        job_def = request.json
        job = Job(job_def['name'], job_def)
        job.type = 'train'
        dt = newdt.now()
        job.start_time = int(dt.timestamp() * 1000)
        job.request = {
            'full_path': request.full_path,
            'remote_addr': request.remote_addr,
            'method': request.method
        }
        if hasattr(job, 'ml_engine') and job.ml_engine:
            jb = train_queue.enqueue(train_mlengine,
                                     job,
                                     job_timeout=-1,
                                     result_ttl=-1)
        else:
            jb = train_queue.enqueue(train_job_method, job, job_timeout=-1)
        jb.meta['job_init_time'] = str(int(dt.timestamp() * 1000))
        jb.meta['job_def'] = job_def
        jb.save_meta()

        json_str = job.to_json_string()
        st = {
            'BUCKET': job.bucket,
            'USE_GCS': job.use_gcs,
            'ACCESS_KEY': access_key,
            'SECRET_KEY': secret_key,
            'S3_URL': s3_url
        }
        storage = Storage(st)
        storage.upload_data(json_str,
                            'jobs/running/{}_0_train_r_{}.json'.format(
                                str(job.start_time), jb.id),
                            contentType='application/json')
        storage.upload_data(json_str,
                            'jobs/all/{}_0_train_r_{}.json'.format(
                                str(job.start_time), jb.id),
                            contentType='application/json')
        return {
            "status": jb.get_status(),
            'job_id': jb.id,
            'meta': jb.meta
        }, 201
Exemplo n.º 15
0
def create_job():
    try:
        job_data = request.json
        job_load = JobSchema().load(job_data)
        job = Job(**job_load)
        job.save()
        job_dump = JobSchema().dump(job)
        return make_response(jsonify(job_dump), 201)
    except ValidationError as err:
        return make_response(jsonify(err.messages), 500)
    except SQLAlchemyError as err:
        logging.error('Error in Job creation: {}'.format(list(err.args)))
        return make_response(
            jsonify({
                'Error Message':
                'There was an error, we coudn\'t create the Job'
            }), 500)
Exemplo n.º 16
0
def Test_useGenericPreprocessor_ThrowExceptionIfUsedPropertyIsNone_NoneProperty():
    job = Job()

    m = ''
    isCatched = False
    try:
        job.useGenericPreprocessor()
    except ArgumentNotSetError as e:
        isCatched = True
        m += e.argument
    except:
        pass


    m = ('\'useGenericPreprocessor\' catched none properties: '
        + 'The following is not set[{}]'.format(m))
    t.test(isCatched, m)
Exemplo n.º 17
0
    def update_jobs(self):
        for c_data in self.data:
            # If company has jobs
            if c_data['jobtitles']:
                jobs = c_data['jobtitles'].split('|JOB|')
                for job in [json.loads(j) for j in jobs]:
                    job = Job(job['title'], job['role_type_id'],
                              c_data['companyid'])
                    self.data_storage.jobs.append(job)
                    self.data_storage.add_job_to_company(job)

            # If company has job tags
            if c_data['jobtags']:
                job_tags = c_data['jobtags'].split('|TAG|')
                for job_tag in [json.loads(j) for j in job_tags]:
                    job_tag = (job_tag['name'], job_tag['id'])
                    self.data_storage.add_job_tag(job_tag)
                    self.data_storage.add_jobtag_to_company(
                        job_tag, c_data['companyid'])
Exemplo n.º 18
0
 def test_process_all(self):
     for jb in prep_job.jobs:
         job = Job(jb['name'], jb)
         job.type = 'preprocess'
         job.init_temp(str(uuid.uuid4()))
         try:
             job.init_labels()
             job.init_storage()
             job.testcoco = {"info": {
                 "description": "COCO 2017 Dataset",
                 "url": "http://cocodataset.org",
                 "version": "1.0",
                 "year": 2018,
                 "contributor": "COCO Consortium",
                 "date_created": "2017/09/01"
             },
                 "licenses": [],
                 "images": [],
                 "categories": [],
                 "annotations": [],
             }
             job.traincoco = {"info": {
                 "description": "COCO 2017 Dataset",
                 "url": "http://cocodataset.org",
                 "version": "1.0",
                 "year": 2018,
                 "contributor": "COCO Consortium",
                 "date_created": "2017/09/01"
             },
                 "licenses": [],
                 "images": [],
                 "categories": [],
                 "annotations": [],
             }
             process_json(job)
             create_label_pbtxt(job)
             create_tf_example(job)
             create_tf_example(job, False)
             delete_staged(job)
             upload_metadata(job)
         finally:
             job.cleanup()
Exemplo n.º 19
0
def job_add(user_id=None, job_db_id=None):
    job_db_obj = storage.get('Job_db', job_db_id)
    if job_db_obj is None:
        abort(404, 'Not found')
    d = datetime.today() - timedelta(days=job_db_obj.date_post)
    print(d)
    new_job = Job(company=job_db_obj.company,
                  position=job_db_obj.position,
                  location=job_db_obj.location,
                  description=job_db_obj.description,
                  user_id=user_id,
                  html_description=job_db_obj.html_description
                  #        link = job_db_obj.link,
                  #        date_post = d
                  )
    storage.new(new_job)
    storage.save()
    return render_template('job_detail.html',
                           job_db_obj=job_db_obj,
                           descrip=Markup(job_db_obj.html_description))
Exemplo n.º 20
0
    def run(self):
        Employer.objects.delete()
        employers = self.get_employers()
        for e in employers:
            print "Creating employer %s" % e
            employer = Employer(**e)
            employer.save()

        Student.objects.delete()
        students = self.get_students()

        experiences = self.get_experiences()
        for i in range(len(experiences)):
            experience_list = experiences[i]
            s = students[i]
            for e in experience_list:
                s.experience = [Experience(**e)]
                s.save()

        educations = self.get_educations()
        for s in students:
            education = choice(educations)
            s.education = [Education(**education)]
            s.save()

        employers = Employer.find({})
        jobs = self.get_jobs()
        for i in range(len(jobs)):
            j = jobs[i]
            e = employers[i]
            j['employer_id'] = e['id']
            job = Job(**j)
            job.save()

        jobs = Job.find({})

        self.save_applications(jobs, students)
Exemplo n.º 21
0
def add_new_job():
    """
        Request Example:
        {
        "job_title": "Software Engineer",
        "job_skills": [ "Python", "Flask", "Django", "AWS" ]
        }

        CURL example:
        curl -i -H "Content-Type: application/json" -X POST -d '{"job_title": "Software Engineer",
        "job_skills": ["Python", "Flask", "Django", "AWS"]}' http://localhost:5000/api/jobs

        Returns: JobSchema Object

    """

    data = request.get_json(force=True)
    new_job = Job(title=data["job_title"])

    # Add skills to job
    for skill_name in data["job_skills"]:
        # Find skill in DB:
        skill = Skill.query.filter_by(name=skill_name).first()

        # check if skill already in DB:
        skill_exist_in_table = skill is not None
        if not skill_exist_in_table:
            # Create new skill row in DB
            skill = Skill(name=skill_name)
            db.session.add(skill)  # store in DB

        # add required skill to job opening
        new_job.skills.append(skill)

    db.session.add(new_job)
    db.session.commit()
    return job_schema.dump(new_job)
Exemplo n.º 22
0
 def test_init_storage(self):
     for jb in prep_job.jobs:
         job = Job(jb['name'], jb)
         job.type = 'preprocess'
         job.init_temp(str(uuid.uuid4()))
         try:
             job.init_labels()
             job.init_storage()
             job.testcoco = {"info": {
                 "description": "COCO 2017 Dataset",
                 "url": "http://cocodataset.org",
                 "version": "1.0",
                 "year": 2018,
                 "contributor": "COCO Consortium",
                 "date_created": "2017/09/01"
             },
                 "licenses": [],
                 "images": [],
                 "categories": [],
                 "annotations": [],
             }
             job.traincoco = {"info": {
                 "description": "COCO 2017 Dataset",
                 "url": "http://cocodataset.org",
                 "version": "1.0",
                 "year": 2018,
                 "contributor": "COCO Consortium",
                 "date_created": "2017/09/01"
             },
                 "licenses": [],
                 "images": [],
                 "categories": [],
                 "annotations": [],
             }
         finally:
             job.cleanup()
Exemplo n.º 23
0
 def build_jobs(self, *nodes):
     return list(map(lambda n: Job(node=n, batch=self), nodes))
Exemplo n.º 24
0
from db_mockup.candidates import candidates  # Candidates mockup
from db_mockup.jobs import jobs              # Jobs mockup
from utilities import candidate_utilities    # Candidates utilities
from models.job import Job                   # Job model

# Specify the maximum number of candidates to be returned for a job. (all = -1)
max_candidates = 5

# Main function - creates a list of candidates objects, loops over jobs and calls candidate_finder for each job.
if __name__ == '__main__':
    current_candidates = candidate_utilities.create_candidates_list(candidates)
    for job in jobs:

        current_job = Job(job.get('id'), job.get('title'), job.get('skills_ids'))
        matching_candidates = candidate_utilities.candidate_finder(current_job, current_candidates, max_candidates)  # Limit the number of candidates for position by adding the limit here

        if matching_candidates:
            print('The best candidates for job number: ' + str(
                current_job.id) + ' - ' + current_job.title + ' position: \n' + str(matching_candidates) + '\n')
        else:
            print('The are not candidates for job number: ' + str(
                current_job.id) + ' - ' + current_job.title + ' position. \n')
Exemplo n.º 25
0
 def make_job(self, data, **kwargs):
     from models.job import Job
     return Job(**data)
Exemplo n.º 26
0
import pandas as pa
from logging2 import FileLogger, Severity
from methods import *
from models.job import Job
from preprocessors.columns import vent_columns

min_5_seq_6 = Job(label='min_5_seq_6')
min_5_seq_6.minSupport = 0.05
min_5_seq_6.dataset = 'datasets/Vent-minute-6.csv'

min_4_seq_6 = Job(label='min_4_seq_6')
min_4_seq_6.minSupport = 0.04
min_4_seq_6.dataset = 'datasets/Vent-minute-6.csv'

min_3_seq_6 = Job(label='min_3_seq_6')
min_3_seq_6.minSupport = 0.03
min_3_seq_6.dataset = 'datasets/Vent-minute-6.csv'

min_2_seq_6 = Job(label='min_2_seq_6')
min_2_seq_6.minSupport = 0.02
min_2_seq_6.dataset = 'datasets/Vent-minute-6.csv'

min_1_seq_6 = Job(label='min_1_seq_6')
min_1_seq_6.minSupport = 0.01
min_1_seq_6.dataset = 'datasets/Vent-minute-6.csv'

min_5_seq_12 = Job(label='min_5_seq_12')
min_5_seq_12.dataset = 'datasets/Vent-minute-12.csv'
min_5_seq_12.minSupport = 0.05

min_5_seq_9 = Job(label='min_5_seq_9')
Exemplo n.º 27
0
def create():
    """Creates a new outgoing fax"""

    account_id = Account.authorize(request.values.get('api_key'))
    if account_id == None:
        return jsonify(api_error('API_UNAUTHORIZED')), 401

    ip = fix_ip(request.headers.get('x-forwarded-for', request.remote_addr))

    if request.method == 'POST':

        if request.files and 'file' in request.files:
            uploaded_file = request.files['file']
        else:
            uploaded_file = None

        v = request.values.get

        if uploaded_file or v('body'):

            data = {
                'account_id': account_id,
                'ip_address': ip,
                'destination': v('destination'),
                'send_authorized': v('send_authorized', 0),
                'cover': v('cover', 0),
                'cover_name': v('cover_name'),
                'cover_address': v('cover_address'),
                'cover_city': v('cover_city'),
                'cover_state': v('cover_state'),
                'cover_zip': v('cover_zip'),
                'cover_country': v('cover_country'),
                'cover_phone': v('cover_phone'),
                'cover_email': v('cover_email'),
                'cover_company': v('cover_company'),
                'cover_to_name': v('cover_to_name'),
                'cover_cc': v('cover_cc'),
                'cover_subject': v('cover_subject'),
                'cover_status': v('cover_status', 'review'),
                'cover_comments': v('cover_comments'),
                'callback_url': v('callback_url')
            }
            if uploaded_file:
                data['filename'] = uploaded_file.filename
            else:
                data['body'] = v('body')

            o(data)

            try:
                job = Job(**data)
                job.validate()
                job.determine_international()
            except ValidationError, err:
                return jsonify(api_error(err.ref)), 400

            db.session.add(job)
            db.session.commit()

            if uploaded_file:
                binary = uploaded_file.stream.read()
            else:
                binary = job.body.replace("\r\n", "\n").encode('utf-8')

            redis_conn = Redis.from_url(current_app.config['REDIS_URI'])
            q = Queue('high', connection=redis_conn)
            q.enqueue_call(func=initial_process,
                           args=(job.id, binary),
                           timeout=300)

            return jsonify(job.public_data())
        else:
            return jsonify(api_error("JOBS_NO_ATTACHMENT")), 400
Exemplo n.º 28
0
def submitCampaign(Session, jobsFile):

    # read yaml description

    jobdef = None

    try:
        campdef = submissionTools.PandaJobsJSONParser.parse(jobsFile)
        campaign = Session.query(Campaign).filter(
            Campaign.name.like(campdef['campaign'])).first()
        if (campaign is None):
            #Don't let colons into campaign names
            campName = re.sub(':', '', campdef['campaign'])
            campaign = Campaign(name=campName,
                                lastUpdate=datetime.datetime.utcnow())
            Session.add(campaign)
            Session.commit()
    except Exception as e:
        logging.error(traceback.format_exc())
        Session.rollback()
        sys.exit(1)

    aSrvID = None

    for j in campdef['jobs']:
        nodes = j['nodes']
        walltime = j['walltime']
        queuename = j['queuename']
        try:
            outputFile = j['outputFile'].strip()
        except:
            outputFile = None
        command = j['command']

        try:
            iterable = j['iterable'].strip()
        except:
            iterable = None

        #Check to see if this is a duplicate output file
        jobsThisOF = Session.query(Job).filter(
            Job.outputFile.like(outputFile)).count()
        if (jobsThisOF > 0):
            print(
                coloured(
                    'Warning:' + str(jobsThisOF) +
                    ' job(s) already exist with output file: \n' + outputFile +
                    '\n', 'red'))

        dbJob = Job(script=command,
                    nodes=nodes,
                    wallTime=walltime,
                    status="To Submit",
                    subStatus="To Submit",
                    campaignID=campaign.id,
                    outputFile=outputFile)
        dbJob.serverName = 'c:' + campaign.name + ':'
        if iterable:
            dbJob.serverName += 'i:' + iterable + ':'
        if outputFile:
            #Panda Server doesn't like slashes in its job names
            dbJob.serverName += 'oF:' + re.sub('/', ';', outputFile) + ':'
        dbJob.serverName += subprocess.check_output('uuidgen')

        dbJob.iterable = iterable

        jobSpec = submissionTools.createJobSpec(walltime=walltime,
                                                command=command,
                                                outputFile=outputFile,
                                                nodes=nodes,
                                                jobName=dbJob.serverName)
        s, o = Client.submitJobs([jobSpec])
        try:
            print(o)
            dbJob.pandaID = o[0][0]
            dbJob.status = 'submitted'
            dbJob.subStatus = 'submitted'
            print(
                coloured(iterable.strip() + ", " + str(o[0][0]) + "\n",
                         'green'))
        except Exception as e:
            logging.error(traceback.format_exc())
            print(coloured(iterable.strip() + " job failed to submit\n",
                           'red'))
            dbJob.status = 'failed'
            dbJob.subStatus = 'failed'
        Session.add(dbJob)
        Session.commit()

    return None
Exemplo n.º 29
0
if __name__ == '__main__':
    global_dict['users'] = []
    global_dict['jobs'] = []

    # asta e pt sesiune
    app.secret_key = os.urandom(12)

    # am bagat 3 useri si 3 joburi ca sa fie, daca mai vedeti erori, ziceti
    global_dict['users'].append(
        User(0, 'client1', '*****@*****.**', '123', 'cl', '1', None, 'desc',
             'phone', 'Client', None))

    global_dict['users'].append(
        User(1, 'client2', '*****@*****.**', '123', 'cl', '2', None, 'desc',
             'phone', 'Client', None))

    global_dict['users'].append(
        User(2, 'worker1', '*****@*****.**', '123', 'wo', '1', None, 'desc',
             'phone', 'Worker', 'Instalator'))

    users_index = 3

    global_dict['jobs'].append(
        Job(0, 'Instalator', 'addr1', 'desc1', 'client1'))
    global_dict['jobs'].append(
        Job(1, 'Instalator', 'addr2', 'desc2', 'client2'))
    global_dict['jobs'].append(Job(2, 'Dadaca', 'addr3', 'desc3', 'client2'))
    jobs_index = 3

    app.run(debug=True)
Exemplo n.º 30
0
def syncCampaign(Session):

    try:
        output = Client.getAllJobs()
        if output[0] != 0:
            raise Exception("Server error")
        else:
            output = json.loads(output[1])['jobs']
    except Exception as e:
        logging.error(traceback.format_exc())
        Session.rollback()
        sys.exit(1)

    jobsToRepopulate = []
    for j in output:
        try:
            #Check for pre-existing job with this pandaid
            #We have to evaluate these queries lazily to avoid throwing an unnecessary exception
            if (j['pandaid'] and j['jobname']):
                isExistingPandaID = Session.query(Job).filter(
                    Job.pandaID.like(j['pandaid']))
                isExistingJobName = Session.query(Job).filter(
                    Job.serverName.like(j['jobname']))
                if (isExistingPandaID.first() is None
                        and isExistingJobName.first() is None):
                    if (len(j['jobname']) > 37):
                        #See if the jobname fits the format
                        campaignName, i, oF = unpackServerName(j['jobname'])
                        if (campaignName):
                            campaign = Session.query(Campaign).filter(
                                Campaign.name.like(campaignName)).first()
                            if (campaign is None):
                                campaign = Campaign(
                                    name=campaignName,
                                    lastUpdate=datetime.datetime.utcnow())
                                Session.add(campaign)
                                Session.commit()
                            #We can't recover the job script from the monitor output - we do that with another query below
                            job = Job(script="unknown",
                                      campaignID=campaign.id,
                                      pandaID=j['pandaid'],
                                      serverName=j['jobname'],
                                      status=j['jobstatus'],
                                      subStatus=j['jobsubstatus'])
                            if i:
                                job.iterable = i
                            #In some instances panda server can report a null substatus. Converting these to empty strings to fulfil database rules
                            if not j['jobsubstatus']:
                                job.subStatus = ""
                            Session.add(job)
                            Session.commit()

                            #Record that this campaign/job id pair was missing, but only after it's been committed
                            jobsToRepopulate.append((campaign.id, job.pandaID))
        except Exception as e:
            logging.error(traceback.format_exc())
            Session.rollback()

    #We need to query each job individually to get its job parameters
    campsToRepopulate = set([seq[0] for seq in jobsToRepopulate])
    for c in campsToRepopulate:
        try:
            camp = Session.query(Campaign).get(c)
            jobs = [seq[1] for seq in jobsToRepopulate if seq[0] == c]
            #Recreate the jobs that were missing
            camp.updateJobs(Session, recreate=True, jobs_to_query=jobs)
            #Now update them all to make sure everything is legit
            camp.updateJobs(Session)
            #Now check to see if we have duplicate output files
            for OF in Session.query(Job).with_entities(
                    Job.outputFile).group_by(Job.outputFile).all():
                jobsThisOF = Session.query(Job).filter(
                    Job.outputFile.like(OF[0])).count()
                if (jobsThisOF > 1):
                    print(
                        coloured(
                            'Warning:' + str(jobsThisOF) +
                            ' job(s) have shared output file: \n' + OF[0] +
                            '\n', 'red'))
        except Exception as e:
            logging.error(traceback.format_exc())
            Session.rollback()
    return None