Example #1
0
def initialize_engine():
    """ Initialize Tractor Engine Client

    Returns:

    """

    def _do_test():
        try:
            tractor_query.jobs("jid=0")
            return True
        except tractor_query.PasswordRequired:
            return False

    from ..constants import (
        TRACTOR_ENGINE_CREDENTIALS_RESOLVER,
        TRACTOR_ENGINE_USER_NAME,
        TRACTOR_ENGINE_USER_PASSWORD
    )

    # check if a user and password is already set,
    # otherwise initialize with JENKINS account
    if not _do_test():
        if TRACTOR_ENGINE_CREDENTIALS_RESOLVER():
            tractor_query.setEngineClientParam(
                user=TRACTOR_ENGINE_CREDENTIALS_RESOLVER()[0],
                password=TRACTOR_ENGINE_CREDENTIALS_RESOLVER()[1]
            )
        else:
            tractor_query.setEngineClientParam(
                user=TRACTOR_ENGINE_USER_NAME,
                password=TRACTOR_ENGINE_USER_PASSWORD
            )
        assert _do_test(), "Unsuccessful login attempt."  # actually we shouldn't need this..
Example #2
0
def setClientServer(server):
    print server
    tq.closeEngineClient()
    #tq.setEngineClientParam(hostname="{0}".format(server), user="******", port=1503, debug=True)
    tq.setEngineClientParam(hostname="{0}".format(server),
                            port=1503,
                            debug=True)
    return "success"
Example #3
0
File: test.py Project: utsdab/usr
def test_params():
    # test setting of all possible parameters
    for param in EngineClient.EngineClient.VALID_PARAMETERS:
        print "set %s" % param
        # value of 0 is okay since type is not checked by API (we could add that though)
        tq.setEngineClientParam(**{param: 0})
    # check that invalid parameters are caught
    try:
        tq.setEngineClientParam(**{"invalidxyz": 0})
    except EngineClient.InvalidParamError, err:
        print "Successfuly caught invalid parameter exception."
    def KillJob(self):

        setup_tq_ownership = tq.setEngineClientParam(hostname="10.180.128.5",
                                                     port=8080,
                                                     user="******",
                                                     debug=True)

        cur_task = tq.tasks("jid={} and tid=1".format(self.GetJobId()),
                            columns=["Job.owner"])
        tq.skip(cur_task)
        tq.kill(cur_task)
        sleep(5)
Example #5
0
    def run(self, task_owner, engine, priority, alf_script, task_uuid, unique_id, dep_file, operation, tactic_file, task_str, count, **kwargs):
        task_id = self.request.id
        self.task_uuid = task_uuid

        self.error_type = None

        self.task_owner = task_owner
        self.unique_id = unique_id
        self.dep_file = dep_file
        self.operation = operation
        self.alf_script = alf_script
        self.tactic_file = tactic_file
        self.task_str = task_str

        # First try 
        if count == 0:
            self.log.info("[{0}]: Inserting into the db: spool".format(task_id))
            self.db_insert_task(self.task_uuid)
        # Retry
        else:
           #6. Retry log
           self.log.info("[{0}]: Retry # {1}, task_uuid {2}".format(task_id, count, self.task_uuid)) 

        try:
            cmd = "task_queue.rfm.tractor.Spool(['--user={0}', --engine={1}', '--priority={2}', '{3}'])".format(task_owner, engine, priority, alf_script, tactic_file, task_str)
            self.log.info("[{0}]: Start command: {1}".format(task_id, cmd))

            # Execute `cmd`
            import paramiko

            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect('119.81.131.43', port=7001, username='******', password='******')

            cmd = "{0} {1} {2};chown render:render {1};chmod 777 {1}".format(FIX_MAYA_FILE_BIN, self.tactic_file, self.task_str)
            print >>sys.stderr, "Executing cmd: ", cmd

            stdin, stdout, stderr = ssh.exec_command(cmd)

            stdout_msg = stdout.readlines()
            stderr_msg = stderr.readlines()

            print >>sys.stderr, 'STDOUT: ', stdout_msg
            print >>sys.stderr, 'STDERR: ', stderr_msg
            
            self.log.info("[{0}]: FIX_MAYA_FILE_BIN stdout: {1}".format(task_id, stdout_msg))
            self.log.info("[{0}]: FIX_MAYA_FILE_BIN stderr: {1}".format(task_id, stderr_msg))

            import task_queue.rfm.tractor
            import json
            retval = task_queue.rfm.tractor.Spool(['--user={0}'.format(task_owner), '--engine={0}'.format(engine), '--priority={0}'.format(priority), '{0}'.format(alf_script)])
            # Get the jid
            task_jid = json.loads(retval).get('jid')

            # Connect to the tractor engine in China, for now via the proxy in sgp
            tq.setEngineClientParam(hostname="119.81.131.43", port=1503, user=task_owner, debug=True)
            # Add the upload task ID as metadata to the task
            metadata = task_id.split('-')[1]
            tq.jattr('jid={0}'.format(task_jid), key='metadata', value=metadata)
            tq.closeEngineClient()
            self.log.info("[{0}]: Updated {1} with metadata: {2}".format(task_id, task_jid, metadata))

            self.log.info("[{0}]: Finish command: {1}".format(task_id, cmd))

            #db_update_task
            self.log.info("[{0}]: Updating the db: spool".format(task_id))
            self.db_update_task(self.task_uuid, retval=retval)

            return task_jid

            '''
            raise Exception("foo bar")
            '''
        except Exception as e:
            #8. DB update
            self.db_update_task(self.task_uuid, 'retry', count=count, exc=e.message)
            self.retry(args=[task_owner, engine, priority, alf_script, task_uuid, unique_id, dep_file, operation, tactic_file, task_str, count+1], exc=e, kwargs=kwargs)

        """
Example #6
0
    def run(self, task_owner, engine, priority, alf_script, task_uuid,
            unique_id, dep_file, operation, tactic_file, task_str, count,
            **kwargs):
        task_id = self.request.id
        self.task_uuid = task_uuid

        self.error_type = None

        self.task_owner = task_owner
        self.unique_id = unique_id
        self.dep_file = dep_file
        self.operation = operation
        self.alf_script = alf_script
        self.tactic_file = tactic_file
        self.task_str = task_str

        # First try
        if count == 0:
            self.log.info(
                "[{0}]: Inserting into the db: spool".format(task_id))
            self.db_insert_task(self.task_uuid)
        # Retry
        else:
            #6. Retry log
            self.log.info("[{0}]: Retry # {1}, task_uuid {2}".format(
                task_id, count, self.task_uuid))

        try:
            cmd = "task_queue.rfm.tractor.Spool(['--user={0}', --engine={1}', '--priority={2}', '{3}'])".format(
                task_owner, engine, priority, alf_script, tactic_file,
                task_str)
            self.log.info("[{0}]: Start command: {1}".format(task_id, cmd))

            # Execute `cmd`
            import paramiko

            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect('119.81.131.43',
                        port=7001,
                        username='******',
                        password='******')

            cmd = "{0} {1} {2};chown render:render {1};chmod 777 {1}".format(
                FIX_MAYA_FILE_BIN, self.tactic_file, self.task_str)
            print >> sys.stderr, "Executing cmd: ", cmd

            stdin, stdout, stderr = ssh.exec_command(cmd)

            stdout_msg = stdout.readlines()
            stderr_msg = stderr.readlines()

            print >> sys.stderr, 'STDOUT: ', stdout_msg
            print >> sys.stderr, 'STDERR: ', stderr_msg

            self.log.info("[{0}]: FIX_MAYA_FILE_BIN stdout: {1}".format(
                task_id, stdout_msg))
            self.log.info("[{0}]: FIX_MAYA_FILE_BIN stderr: {1}".format(
                task_id, stderr_msg))

            import task_queue.rfm.tractor
            import json
            retval = task_queue.rfm.tractor.Spool([
                '--user={0}'.format(task_owner), '--engine={0}'.format(engine),
                '--priority={0}'.format(priority), '{0}'.format(alf_script)
            ])
            # Get the jid
            task_jid = json.loads(retval).get('jid')

            # Connect to the tractor engine in China, for now via the proxy in sgp
            tq.setEngineClientParam(hostname="119.81.131.43",
                                    port=1503,
                                    user=task_owner,
                                    debug=True)
            # Add the upload task ID as metadata to the task
            metadata = task_id.split('-')[1]
            tq.jattr('jid={0}'.format(task_jid),
                     key='metadata',
                     value=metadata)
            tq.closeEngineClient()
            self.log.info("[{0}]: Updated {1} with metadata: {2}".format(
                task_id, task_jid, metadata))

            self.log.info("[{0}]: Finish command: {1}".format(task_id, cmd))

            #db_update_task
            self.log.info("[{0}]: Updating the db: spool".format(task_id))
            self.db_update_task(self.task_uuid, retval=retval)

            return task_jid
            '''
            raise Exception("foo bar")
            '''
        except Exception as e:
            #8. DB update
            self.db_update_task(self.task_uuid,
                                'retry',
                                count=count,
                                exc=e.message)
            self.retry(args=[
                task_owner, engine, priority, alf_script, task_uuid, unique_id,
                dep_file, operation, tactic_file, task_str, count + 1
            ],
                       exc=e,
                       kwargs=kwargs)
        """
Example #7
0
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)5.5s \t%(name)s \t%(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)

################################
env = envfac.Environment()
# tractorjob=envfac.TractorJob()
_thisuser = os.getenv("USER")
_hostname = env.config.getdefault("tractor", "engine")
_port = env.config.getdefault("tractor", "port")
_user = env.config.getdefault("tractor", "jobowner")

tq.setEngineClientParam(hostname=_hostname,
                        port=int(_port),
                        user=_user,
                        debug=True)


class JobDetails(object):
    def __init__(self, jid=None):
        self.jid = jid
        _mdata = {}

        try:
            _job = tq.jobs(
                "jid in [{}]".format(jid),
                columns=["jid", "title", "metadata", "numerror", "spooled"])
            _jid = _job[0]["jid"]
            _title = _job[0]["title"]
            _mdata["jid"] = _job[0]["jid"]
# -*- coding: utf-8 -*-
import tractor.api.query as tq
import time
from dateutil.parser import parse
#!/usr/bin/env python

from datetime import datetime, timedelta
import pytz

tq.setEngineClientParam(hostname="localhost", port=1503, user="******", debug=False)

(active_jobs, waiting_jobs, paused_jobs) = (list(), list(), list())

jobs = tq.jobs("not done", sortby=['jid'])

def humanize_time(secs):
    mins, secs = divmod(secs, 60)
    hours, mins = divmod(mins, 60)
    return '%02d:%02d:%02d' % (hours, mins, secs)

for job in jobs:
    if job['numactive'] is 0 and job['pausetime'] is None and job['numdone'] == 0:
        waiting_jobs.append(job)
    elif job['numactive'] is 0 and job['pausetime'] is None and job['numdone'] != 0:
        waiting_jobs.append(job)
    elif job['numactive'] is 0 and job['pausetime'] is not None:
        paused_jobs.append(job)
    else:
        active_jobs.append(job)

def analyze_jobs(jobs):