Exemplo n.º 1
0
def render_task(dburl, docpath, slug):
    """Render a document."""
    oldcwd = os.getcwd()
    try:
        os.chdir(os.path.join(docpath, slug))
    except:
        db = StrictRedis.from_url(dburl)
        job = get_current_job(db)
        job.meta.update({'out': 'Document not found.', 'return': 127, 'status': False})
        return 127

    db = StrictRedis.from_url(dburl)
    job = get_current_job(db)
    job.meta.update({'out': '', 'milestone': 0, 'total': 1, 'return': None,
                     'status': None})
    job.save()

    p = subprocess.Popen(('lualatex', '--halt-on-error', slug + '.tex'),
                         stdout=subprocess.PIPE)

    out = []

    while p.poll() is None:
        nl = p.stdout.readline()
        out.append(nl)
        job.meta.update({'out': ''.join(out), 'return': None,
                         'status': None})
        job.save()

    out = ''.join(out)
    job.meta.update({'out': ''.join(out), 'return': p.returncode, 'status':
                     p.returncode == 0})
    job.save()
    os.chdir(oldcwd)
    return p.returncode
Exemplo n.º 2
0
def setup_container(d_os):
     cur_job = get_current_job()
     cur_job.meta['ownername'] = str(d_os['username'])
     cur_job.meta['request_status'] = "Performing status check"
     cur_job.save()
     cur_job.refresh()
     if d_os['ct_type'] == "openvz":
	cmd="vzctl set "+d_os['cid']+" --ipadd "+d_os['ipadd']+" --hostname "+d_os['hname']+" --nameserver "+d_os['nserver']+" --userpasswd "+d_os['usr']+":"+d_os['pwd']+" --save"
	print cmd
	out = check_output(shlex.split(cmd))
     elif d_os['ct_type'] == "aws_vm":
        #create file under /home/laks/tmp/tutorials with ipaddr.
        fpathname = "/home/laks/tmp/tutorials/" + str(d_os['instance'].ip_address) + ".json"
        fd = open(fpathname,"w")
        fd.write(d_os['tutorial'])
        fd.close()

        spoty.install_sw(d_os['instance'],d_os['repo'])
        findreplace = [
        ("SUBDOMAIN",d_os['username']),
        ("IPADDRESS",d_os['instance'].ip_address)
        ]
        creat_nginx_tmpl(findreplace,d_os)
        reload_nginx()

     else:
        print "setting up subdomain for user" + str(d_os['username'])
        cmd = "docker inspect --format '{{ .NetworkSettings.IPAddress }}' " +str(d_os['imgid'])
        proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
        ipaddr, err = proc.communicate()
        print ipaddr
        if d_os['code'] == 1:
		findreplace = [
		("SUBDOMAIN",d_os['username']+str(d_os['code'])),
		("IPADDRESS",ipaddr)
		]
        else:
		findreplace = [
		("SUBDOMAIN",d_os['username']),
		("IPADDRESS",ipaddr)
		]
        creat_nginx_tmpl(findreplace,d_os)
        reload_nginx()
        time.sleep(2)
        setup_docker_ct_helper(d_os)

     cur_job = get_current_job()
     cur_job.meta['ownername'] = str(d_os['username'])
     cur_job.meta['request_status'] = "Running, please login"
     cur_job.save()
     cur_job.refresh()
     #next queue
     if d_os['proceed_nextq'] :
           with Connection(Redis()):
         	q=Queue('startq', default_timeout=15000)
  		job = q.enqueue_call(func=start_container,args=(d_os,),result_ttl=600)
Exemplo n.º 3
0
 def __init__(self):
     self.job = get_current_job()
     self.timeout = 60 * 10
     self.start_date = time.time()
     while 'mongo_id' not in self.job.meta:
         self.job = get_current_job()
     self.mongo_id = ObjectId(self.job.meta['mongo_id'])
     file_ = inspect.getfile(self.__class__)
     self.name = os.path.basename(file_).split('.')[0]
     self.result = "Ok"
     self._do_run = True
     self.run()
Exemplo n.º 4
0
def add_job(username, domain, project):

    _job = get_current_job()

    payload = {'project': project, 'username': username, 'spider': domain, 'jobid': _job.id}
    req = urllib2.urlopen(scrapyd_uri, data=urllib.urlencode(payload))
    if req.getcode() != 200:
        raise Exception

    while True:
        job = get_current_job()
        print 'job waiting. jobid: %s, meta: %s' % (job.id, job.meta)
        if 'status' in job.meta:
            return
        time.sleep(5)
Exemplo n.º 5
0
 def get_csv_rows(self, queryset, type, model):
     data = [self.get_csv_header()]
     total = queryset.count()
     processed = 0
     job = get_current_job()
     for asset in queryset:
         row = ['part'] if asset.part_info else ['device']
         for item in self.columns:
             field = item.field
             if field:
                 nested_field_name = item.foreign_field_name
                 if nested_field_name == type:
                     cell = self.get_cell(
                         getattr(asset, type), field, model
                     )
                 elif nested_field_name == 'part_info':
                     cell = self.get_cell(asset.part_info, field, PartInfo)
                 elif nested_field_name == 'venture':
                     cell = self.get_cell(asset.venture, field, Venture)
                 elif nested_field_name == 'is_discovered':
                     cell = unicode(asset.is_discovered)
                 else:
                     cell = self.get_cell(asset, field, Asset)
                 row.append(unicode(cell))
         data.append(row)
         processed += 1
         set_progress(job, processed / total)
     set_progress(job, 1)
     return data
Exemplo n.º 6
0
def save_assembly_job(assembly, fasta_path, calculate_fourmers,
                      search_genes, email=None, 
                      coverage_filename=None, bulk_size=5000):
    job = get_current_job()

    # Find essential genes
    essential_genes = None
    if search_genes:
        job.meta['status'] = 'Searching for essential genes per contig'
        job.save()
        essential_genes = find_essential_genes_per_contig(fasta_path)

    # Save contigs to database
    job.meta['status'] = 'Saving contigs'
    job.save()
    args = [assembly, fasta_path, calculate_fourmers, essential_genes, bulk_size]
    if coverage_filename is not None:
        samples, coverages = read_coverages(coverage_filename)
        args.append(coverages)
        assembly.samples = ','.join(samples)
    notfound = save_contigs(*args)
    job.meta['notfound'].extend(notfound)
    job.save()

    assembly.busy = False
    db.session.add(assembly)
    db.session.commit()

    if email:
        utils.send_completion_email(email, assembly.name)

    return {'assembly': assembly.id}
Exemplo n.º 7
0
    def update_repo(self):
        with Connection(db):
            current_job = get_current_job()
            if 'update_repo' != current_job.origin:
                logger.error('Only the repo worker can update repos!')
                return

        trans_running = status.transactions_running or status.transaction_queue
        building_saved = False
        excluded = [
            'Updating antergos repo database.',
            'Updating antergos-staging repo database.',
            'Processing developer review result.',
            'Checking remote package sources for changes.',
        ]

        if not status.idle and trans_running and status.current_status not in excluded:
            building_saved = status.current_status
        elif status.idle:
            status.idle = False

        msg = excluded[0] if 'antergos' == self.name else excluded[1]
        status.current_status = msg

        self._update_repo()

        trans_running = status.transactions_running or status.transaction_queue

        if building_saved and not status.idle and status.current_status == msg:
            status.current_status = building_saved

        elif status.idle or not trans_running:
            status.idle = True
            status.current_status = 'Idle.'
Exemplo n.º 8
0
def walk(client, metadata, bytes_read, total_bytes):
    job = get_current_job()
    dir_path = os.path.basename(metadata['path'])
    bytes = metadata['bytes']
    bytes_read += int(bytes)
    update_progress(job, float(bytes_read) / total_bytes, dir_path)

    result = {'name':os.path.basename(dir_path), 'children':[], 'value':bytes}

    if 'contents' in metadata:
        for dir_entry in metadata['contents']:
            path = dir_entry['path']
            # Skip hidden files, shit gets too rowdy
            if os.path.basename(path)[0] == '.':
                continue
            dir_entry_bytes = dir_entry['bytes']
            bytes_read += int(dir_entry_bytes)
            update_progress(job, float(bytes_read) / total_bytes, path)
            if dir_entry_bytes is 0:
                child, bytes_read = walk(client, get_metadata(client, path), bytes_read, total_bytes)
            else:
                child = {'name':os.path.basename(path), 'value':dir_entry_bytes}
            result['children'].append(child)
    #empty directories? do we care?
    if len(result['children']) is 0:
        _ = result.pop('children', None)
    return result, bytes_read
Exemplo n.º 9
0
def stop(path, machineName, host, environment):
    new_env = resetEnv(host, environment)
    logger.debug('Bring down {}'.format(path))
    old_path = os.getcwd()
    jobid = get_current_job().id
    try:
        os.chdir(path)
        _open_console(jobid)
        if machineName != '':
            _l = lambda line: _log_console(jobid, str(line))
            sh.vagrant('halt', machineName,
                       _ok_code=[0, 1, 2],
                       _out=_l, _err=_l,
                       _env=new_env).wait()
        else:
            _l = lambda line: _log_console(jobid, str(line))
            sh.vagrant('halt',
                       _ok_code=[0, 1, 2],
                       _out=_l, _err=_l,
                       _env=new_env).wait()
    except:
        logger.error('Failed to shut down machine {}'.format(path),
                     exc_info=True)

    _close_console(jobid)
    os.chdir(old_path)
    # logger.debug('Done bring down {}'.format(path))
    return json.dumps(_get_status(path, host, environment))
Exemplo n.º 10
0
def provision(path, environment, machineName, host):
    new_env = resetEnv(host, environment)
    # logger.debug('Running provision on {} with env {}'
    #            .format(path, environment))
    old_path = os.getcwd()
    jobid = get_current_job().id
    try:
        os.chdir(path)
        _open_console(jobid)
        if machineName != '':
            _l = lambda line: _log_console(jobid, str(line))
            sh.vagrant('provision', machineName,
                       _ok_code=[0, 1, 2],
                       _out=_l, _err=_l,
                       _env=new_env).wait()
        else:
            _l = lambda line: _log_console(jobid, str(line))
            sh.vagrant('provision',
                       _ok_code=[0, 1, 2],
                       _out=_l, _err=_l,
                       _env=new_env).wait()
    except:
        logger.error('Failed to provision machine at {}'.format(path),
                     exc_info=True)
    _close_console(jobid)
    os.chdir(old_path)
    return json.dumps(_get_status(path, host, environment))
Exemplo n.º 11
0
def run_tests(payload):
    #payload = get_payload(payload_id)
    job = get_current_job()

    # work out the repo_url
    repo_name = payload['repository']['name']
    owner = payload['repository']['owner']['name']
    repo_url = "[email protected]:%s/%s.git" % (owner, repo_name)

    update_progress(job, 'repo url: %s' % repo_url)
    logger.info("repo: %s" % repo_url)

    vpath = tempfile.mkdtemp(suffix="ridonkulous")

    logger.info("cloning repo %s to: %s" % (repo_url, vpath))
    update_progress(job, "cloning repo %s to: %s" % (repo_url, vpath))

    create_environment(vpath, site_packages=False)

    os.chdir(vpath)

    git.Git().clone(repo_url)
    os.chdir(os.path.join(vpath, repo_name))

    pip = "%s/bin/pip" % vpath
    #python = "%s/bin/python"
    nose = "%s/bin/nosetests" % vpath

    ret = subprocess.call(r'%s install -r requirements.txt --use-mirrors' % pip, shell=True)

    logger.info("running nose")
    ret = subprocess.call(r'%s' % nose, shell=True)
    logger.info(ret)
    update_progress(job, 'done')
    return 'ok'
Exemplo n.º 12
0
def archive(files):
    """
    argument expected:
    [
        {"path":"/path/to/file", "name","file_name_in_zip"},
        { ... },
        ...
    ]
    returns a s3 url
    """
    job = rq.get_current_job(get_redis())
    handle, tempname = tempfile.mkstemp()
    os.close(handle)
    with ZipFile(tempname, mode='w', compression=ZIP_DEFLATED) as zipfile:
        for file_ in files:
            zipfile.write(file_['path'], file_['name'])

    job.meta['size'] = si_unit(os.path.getsize(tempname))
    job.save()

    objname = str(uuid.uuid4()) + ".zip"

    s3 = boto3.resource('s3')
    s3.Bucket(bucket).upload_file(tempname, objname, ExtraArgs={'ContentType':'application/zip'})

    os.remove(tempname)

    url = "https://%s.s3.amazonaws.com/%s" % (bucket, objname)
    return url
Exemplo n.º 13
0
def rsync(path, host, environment, machineName=None):
    new_env = resetEnv(host, environment)
    old_path = os.getcwd()
    os.chdir(path)
    try:
        jobid = get_current_job().id
        _open_console(jobid)
        _log_console(
            jobid,
            'Running rsync on machine {}.\n'.format(machineName)
        )

        _l = lambda line: _log_console(jobid, str(line))

        if machineName is not None:
            sh.vagrant('rsync', machineName,
                       _out=_l,
                       _err=_l,
                       _ok_code=[0, 1, 2],
                       _env=new_env).wait()
        else:
            sh.vagrant('rsync',
                       _out=_l,
                       _err=_l,
                       _ok_code=[0, 1, 2],
                       _env=new_env).wait()
        _log_console(
            jobid,
            'rsync is done running on machine {}.\n'.format(machineName))
        _close_console(jobid)
    except:
        return json.dumps({'msg': 'error trying to run vagrant rsync'})
    os.chdir(old_path)
    return json.dumps({'msg': 'rsync done'})
Exemplo n.º 14
0
def process_document(path, options, meta):
    current_task = get_current_job()
    with Office(app.config["LIBREOFFICE_PATH"]) as office: # acquire libreoffice lock
        with office.documentLoad(path) as original_document: # open original document
            with TemporaryDirectory() as tmp_dir: # create temp dir where output'll be stored
                for fmt in options["formats"]: # iterate over requested formats
                    current_format = app.config["SUPPORTED_FORMATS"][fmt]
                    output_path = os.path.join(tmp_dir, current_format["path"])
                    original_document.saveAs(output_path, fmt=current_format["fmt"])
                if options.get("thumbnails", None):
                    is_created = False
                    if meta["mimetype"] == "application/pdf":
                        pdf_path = path
                    elif "pdf" in options["formats"]:
                        pdf_path = os.path.join(tmp_dir, "pdf")
                    else:
                        pdf_tmp_file = NamedTemporaryFile()
                        pdf_path = pdf_tmp_file.name
                        original_document.saveAs(pdf_tmp_file.name, fmt="pdf")
                        is_created = True
                    image = Image(filename=pdf_path,
                                  resolution=app.config["THUMBNAILS_DPI"])
                    if is_created:
                        pdf_tmp_file.close()
                    thumbnails = make_thumbnails(image, tmp_dir, options["thumbnails"]["size"])
                result_path, result_url = make_zip_archive(current_task.id, tmp_dir)
        remove_file.schedule(
            datetime.timedelta(seconds=app.config["RESULT_FILE_TTL"]),
            result_path
        )
    return result_url
Exemplo n.º 15
0
 def get_csv_rows(self, queryset, type, model):
     data = [self.get_csv_header()]
     total = queryset.count()
     processed = 0
     job = get_current_job()
     for asset in queryset:
         row = ['part', ] if asset.part_info else ['device', ]
         for item in self.columns:
             field = item.field
             if field:
                 nested_field_name = item.foreign_field_name
                 if nested_field_name == type:
                     cell = self.get_cell(
                         getattr(asset, type), field, model
                     )
                 elif nested_field_name == 'part_info':
                     cell = self.get_cell(asset.part_info, field, PartInfo)
                 elif nested_field_name == 'venture':
                     cell = self.get_cell(asset.venture, field, Venture)
                 else:
                     cell = self.get_cell(asset, field, Asset)
                 row.append(unicode(cell))
         data.append(row)
         processed += 1
         if job:
             job.meta['progress'] = processed / total
             if not job.meta['start_progress']:
                 job.meta['start_progress'] = datetime.datetime.now()
             job.save()
     if job:
         job.meta['progress'] = 1
         job.save()
     return data
Exemplo n.º 16
0
 def _poolJobs(self, db_name, check=False):
     """Check if we are a worker process.
     """
     if get_current_connection() and get_current_job():
         pass
     else:
         super(IrCron, self)._poolJobs(db_name, check)
Exemplo n.º 17
0
def build_pkg_handler():
    """


    :return:
    """
    status.idle = False
    packages = status.queue
    if len(packages) > 0:
        pack = status.queue.lpop()
        if pack and pack is not None and pack != '':
            pkgobj = package.get_pkg_object(name=pack)
        else:
            return False

        rqjob = get_current_job(db)
        rqjob.meta['package'] = pkgobj.name
        rqjob.save()

        status.now_building = pkgobj.name

        if pkgobj.is_iso is True or pkgobj.is_iso == 'True':
            status.iso_building = True
            build_result = build_iso(pkgobj)
        else:
            build_result = build_pkgs(pkgobj)

        # TODO: Move this into its own method
        if build_result is not None:
            completed = status.completed
            failed = status.failed
            blds = pkgobj.builds
            total = len(blds)
            if total > 0:
                success = len([x for x in blds if x in completed])
                failure = len([x for x in blds if x in failed])
                if success > 0:
                    success = 100 * success / total
                else:
                    success = 0
                if failure > 0:
                    failure = 100 * failure / total
                else:
                    failure = 0
                pkgobj.success_rate = success
                pkgobj.failure_rate = failure

        if build_result is True:
            run_docker_clean(pkgobj.pkgname)

    if not status.queue and not status.hook_queue:
        remove('/opt/antergos-packages')
        status.idle = True
        status.building = 'Idle'
        status.now_building = 'Idle'
        status.container = ''
        status.building_num = ''
        status.building_start = ''
        status.iso_building = False
        logger.info('All builds completed.')
Exemplo n.º 18
0
Arquivo: manual.py Projeto: ar4s/ralph
def scan_address_job(
    ip_address=None,
    plugins=None,
    results=None,
    automerge=AUTOMERGE_MODE,
    **kwargs
):
    """
    The function that is actually running on the worker.
    """

    job = rq.get_current_job()
    available_plugins = getattr(settings, 'SCAN_PLUGINS', {}).keys()
    if not plugins:
        plugins = available_plugins
    run_postprocessing = not (set(available_plugins) - set(plugins))
    if ip_address and plugins:
        if not kwargs:
            ip, created = IPAddress.concurrent_get_or_create(
                address=ip_address,
            )
            kwargs = {
                'snmp_community': ip.snmp_community,
                'snmp_version': ip.snmp_version,
                'http_family': ip.http_family,
                'snmp_name': ip.snmp_name,
            }
        results = _run_plugins(ip_address, plugins, job, **kwargs)
    if run_postprocessing:
        _scan_postprocessing(results, job, ip_address)
        # Run only when automerge mode is enabled and some change was detected.
        # When `change` state is not available just run it...
        if automerge and job.meta.get('changed', True):
            save_job_results(job.id)
    return results
Exemplo n.º 19
0
    def _employees(self, company_name="", keyword=""):
        ''' Linkedin Scrape '''
        # TODO - add linkedin directory search
        ''' Linkedin Scrape'''
        args = '-inurl:"/dir/" -inurl:"/find/" -inurl:"/updates"'
        args = args+' -inurl:"job" -inurl:"jobs2" -inurl:"company"'
        qry = '"at {0}" {1} {2} site:linkedin.com'
        qry = qry.format(company_name, args, keyword)
        results = Google().search(qry, 10)
        results = results.dropna()
        results = Google()._google_df_to_linkedin_df(results)
        _name = '(?i){0}'.format(company_name)
        if " " in company_name:
            results['company_score'] = [fuzz.partial_ratio(_name, company) 
                                        for company in results.company]
        else:
            results['company_score'] = [fuzz.ratio(_name, company) 
                                        for company in results.company]
        if keyword != "":
            results['score'] = [fuzz.ratio(keyword, title) 
                                for title in results.title]
            results = results[results.score > 75]

        results = results[results.company_score > 64]
        results = results.drop_duplicates()
        data = {'data': results.to_dict('r'), 'company_name':company_name}
        CompanyExtraInfoCrawl()._persist(data, "employees", "")

        job = rq.get_current_job()
        print job.meta.keys()
        if "queue_name" in job.meta.keys():
          if RQueue()._has_completed(job.meta["queue_name"]):
            q.enqueue(Jigsaw()._upload_csv, job.meta["company_name"])
        return results
Exemplo n.º 20
0
    def process_and_save_build_metadata(self, version_str=None):
        """
        Initializes the build metadata.

        Args:
            pkg_obj (Package): Package object for the package being built.

        Returns:
            Build: A build object.

        """

        self.start_str = self.datetime_to_string(datetime.now())

        if version_str:
            self.version_str = version_str
        else:
            self.version_str = self._pkg_obj.version_str

        pkg_link = '<a href="/package/{0}">{0}</a>'.format(self._pkg_obj.pkgname)

        tpl = 'Build <a href="/build/{0}">{0}</a> for {1} <strong>{2}</strong> started.'

        tlmsg = tpl.format(self.bnum, pkg_link, self.version_str)

        get_timeline_object(msg=tlmsg, tl_type=3, ret=False)

        self._pkg_obj.builds.append(self.bnum)
        status.now_building.append(self.bnum)

        with Connection(self.db):
            current_job = get_current_job()
            current_job.meta['building_num'] = self.bnum
            current_job.save()
Exemplo n.º 21
0
def scan_address_job(
    ip_address=None,
    plugins=None,
    results=None,
    automerge=AUTOMERGE_MODE,
    called_from_ui=False,
    **kwargs
):
    """The function that is actually running on the worker."""

    job = rq.get_current_job()
    available_plugins = getattr(settings, 'SCAN_PLUGINS', {}).keys()
    if not plugins:
        plugins = available_plugins
    run_postprocessing = not (set(available_plugins) - set(plugins))
    if ip_address and plugins:
        if not kwargs:
            ip, created = IPAddress.concurrent_get_or_create(
                address=ip_address,
            )
            if not (ip.snmp_name and ip.snmp_community):
                message = "SNMP name/community is missing. Forcing autoscan."
                job.meta['messages'] = [
                    (ip_address, 'ralph.scan', 'info', message)
                ]
                job.save()
                autoscan_address(ip_address)
            kwargs = {
                'snmp_community': ip.snmp_community,
                'snmp_version': ip.snmp_version,
                'http_family': ip.http_family,
                'snmp_name': ip.snmp_name,
            }
        results = _run_plugins(ip_address, plugins, job, **kwargs)
    if run_postprocessing:
        _scan_postprocessing(results, job, ip_address)
        if automerge and job.meta.get('changed', True):
            # Run only when automerge mode is enabled and some change was
            # detected. When `change` state is not available just run it...
            save_job_results(job.id)
        elif not called_from_ui and job.args and job.meta.get('changed', True):
            # Run only when some change was detected. When `change` state is
            # not available just run it...
            try:
                ip_obj = IPAddress.objects.select_related().get(
                    address=job.args[0]  # job.args[0] == ip_address
                )
            except IPAddress.DoesNotExist:
                pass
            else:
                for plugin_name in getattr(
                    settings, 'SCAN_POSTPROCESS_ENABLED_JOBS', []
                ):
                    try:
                        module = import_module(plugin_name)
                    except ImportError as e:
                        logger.error(unicode(e))
                    else:
                        module.run_job(ip_obj)
    return results
Exemplo n.º 22
0
def _scan_address(address, plugins, **kwargs):
    """The function that is actually running on the worker."""

    job = rq.get_current_job()
    results = {}
    job.meta['messages'] = []
    job.meta['finished'] = []
    job.meta['status'] = {}
    for plugin_name in plugins:
        message = "Running plugin %s." % plugin_name
        job.meta['messages'].append((address, plugin_name, 'info', message))
        job.save()
        try:
            module = import_module(plugin_name)
        except ImportError as e:
            message = 'Failed to import: %s.' % e
            job.meta['messages'].append((address, plugin_name, 'error', message))
            job.meta['status'][plugin_name] = 'error'
        else:
            result = module.scan_address(address, **kwargs)
            results[plugin_name] = result
            for message in result.get('messages', []):
                job.meta['messages'].append((address, plugin_name, 'warning', message))
            job.meta['status'][plugin_name] = result.get('status', 'success')
        job.meta['finished'].append(plugin_name)
        job.save()
    return results
Exemplo n.º 23
0
def nhmmer_search(sequence, description):
    """
    RQ worker function.
    """
    job = get_current_job()
    save_query(sequence, job.id, description)
    filename = NhmmerSearch(sequence=sequence, job_id=job.id)()
    save_results(filename, job.id)
Exemplo n.º 24
0
 def test(self, company_name):
     job = rq.get_current_job()
     print job.meta.keys()
     if "queue_name" in job.meta.keys():
       print RQueue()._has_completed(job.meta["queue_name"])
       print RQueue()._has_completed("queue_name")
       if RQueue()._has_completed(job.meta["queue_name"]):
         q.enqueue(Jigsaw()._upload_csv, job.meta["company_name"])
Exemplo n.º 25
0
def create_container(d_os):
    num = d_os['num_instance']
    if d_os['ct_type'] == "docker" and already_running(d_os['username'],d_os['code']):
          print "Ignore  ct_create request"
          return

    print " -->>Running for user " + str(d_os['username']) + "with ct_type" + str(d_os['ct_type']) + "uptime is:" + str(d_os['container_uptime'])
    cur_job = get_current_job()
    cur_job.meta['ownername'] = str(d_os['username'])
    cur_job.save()
    cur_job.refresh()

    while num > 0:
            if d_os['ct_type'] == "openvz": 
          	 cmd="vzctl create "+d_os['cid']+" --ostemplate "+ d_os['ostemplate']
            elif d_os['ct_type'] == "aws_vm":
                 ec2_conn=spoty.ec2_connect()
		 config_entry =  spoty.read_conf_file(d_os['repo'])   #Read distro specific config file.
    		 cur_job.meta['request_status'] = "Reading config files"
		 cur_job.save()
		 cur_job.refresh()
                 spot,bdm = spoty.req_instance_and_tag(ec2_conn,config_entry)
    		 cur_job.meta['request_status'] = "Creating VM"
		 cur_job.save()
		 cur_job.refresh()
                 instance=spoty.set_bdm(spot,bdm,ec2_conn,config_entry)
    		 cur_job.meta['request_status'] = "Booting VM"
		 cur_job.save()
		 cur_job.refresh()
                 #push it into d_os
                 d_os['instance'] = instance
                 d_os['ec2_conn'] = ec2_conn
                 cmd = "uname -a"
            else:
		 d_os['repo_vers']='2'
		 if d_os['code'] == 1:
                      d_os['repo_vers']='3'
		      d_os['container_uptime'] = 3600
                 cmd="docker run --user wmuser --name "+ d_os['username']+str(d_os['code']) + ' ' + d_os['options']+d_os['port'] + d_os['repo'] + d_os['repo_vers'] + d_os['ct_cmd']
	    print "Starting.."
	    print cmd 
	    out = check_output(shlex.split(cmd))
	    print "Output is:"
	    print out
            d_os['imgid'] = out.rstrip()
	    num -= 1
            if d_os['code'] == 1 :
	        programmingsite.movedata_host2ct(d_os)

    if d_os['proceed_nextq'] :
	    with Connection(Redis()):
		q=Queue('setupq', default_timeout=15000)
  	        job = q.enqueue_call(func=setup_container,args=(d_os,),result_ttl=600)
    		cur_job.meta['request_status'] = "Install Software"
		cur_job.meta['setupq_jobid'] = job.id
		cur_job.save()
		cur_job.refresh()
		print cur_job.meta
Exemplo n.º 26
0
 def unregister_dirty(self, decrement=1):
     """Unregister current TreeItem as dirty
     (should be called from RQ job procedure after cache is updated)
     """
     r_con = get_connection()
     job = get_current_job()
     logger.debug('UNREGISTER %s (-%s) where job_id=%s' %
                  (self.get_cachekey(), decrement, job.id))
     r_con.zincrby(POOTLE_DIRTY_TREEITEMS, self.get_cachekey(), 0 - decrement)
Exemplo n.º 27
0
def send_message(**params):
    """
        Tries to send the message with specified parameters & number of retries
        
        Args:
            to (list) - List of emails to send the message to
            from_email (str) - Email to send the message on behalf of
            subject (str) - Subject of the message
            text (str) - Main text that should go in the body of the message
            cc (list) - Optional; list of emails to send the message to, with the 'cc' header
            bcc (list) - Optional; list of emails to send the message to, with the 'bcc' header
            retries (int) - Optional; number of times each Mailer implementation should try to send the message
    
            All email fields are as specified in RFC-822
    """
    retries = params.get('retries', 1) #By default retry 1 time
    
    # TODO: Random shuffling is a crude load-balancing method. Ideally we may want to consider
    # the number of requests to send message made to each Mailer and route new requests accordingly.
    mailers = get_available_mailers()
    shuffle(mailers)

    #TODO: Check if rq has any inbuilt retry mechanism that can be leveraged
    while retries >= 0:
        for mailer in mailers:
            try:
                messages_info = mailer.send_message(**params)
                
                job = get_current_job()
                job.meta['handled_by'] = mailer.__class__.__name__
                job.meta['messages_info'] = messages_info
                job.save()

                # TODO: Use a better way to store status info & metadata for it
                return

            except MailNotSentException as e:
                # TODO: Use logging here to log details of why this mail wasn't sent using
                # e.message & e.status_code. Also, add more details to MailNotSentException
                # if required
                pass
            
            except ConnectTimeout as e:
                # TODO: log
                pass
            
            # Catch other Exceptions that can be thrown here
            
            except Exception as e:
                # If the send_message method fails for any reason whatsoever, we want to use the
                # next Mailer.
                # TODO: Log. These logs will be very important as they'll let us know about failures
                # we're not anticipating
                pass

        retries = retries - 1
Exemplo n.º 28
0
 def unregister_all_dirty(self, decrement=1):
     """Unregister current TreeItem and all parent paths as dirty
     (should be called from RQ job procedure after cache is updated)
     """
     r_con = get_connection()
     job = get_current_job()
     for p in self.all_pootle_paths():
         logger.debug('UNREGISTER %s (-%s) where job_id=%s' %
                      (p, decrement, job.id))
         r_con.zincrby(POOTLE_DIRTY_TREEITEMS, p, 0 - decrement)
Exemplo n.º 29
0
def _set_task_progress(task):
    """
    This method will update the job progress using the task object
    :param task : Task
    :return:
    """
    job = get_current_job()
    if job:
        job.meta['progress'] = task.export()
        job.save_meta()
Exemplo n.º 30
0
def create_zim(settings, options):
    """Call the zim creator and the mailer when it is finished.
    """
    job = get_current_job()
    log_dir = settings.get('zimit.logdir', '/tmp')
    log_file = os.path.join(log_dir, "%s.log" % job.id)
    zim_creator = load_from_settings(settings, log_file)
    zim_file = zim_creator.create_zim_from_website(options['url'], options)
    output_url = settings.get('zimit.output_url')
    zim_url = urlparse.urljoin(output_url, zim_file)
    send_zim_url(settings, options['email'], zim_url)