def test_action(self):
     context = {
         "tasks_for": "action",
         "repository": {
             "url": "https://hg.mozilla.org/mozilla-central",
             "project": "mozilla-central",
             "level": 3,
         },
         "push": {
             "revision": "e8d2d9aff5026ef1f1777b781b47fdcbdb9d8f20",
             "owner": "*****@*****.**",
             "pushlog_id": 1556565286,
             "pushdate": 112957,
         },
         "action": {
             "name": "test-action",
             "title": "Test Action",
             "description": "Just testing",
             "taskGroupId": slugid.nice().encode("ascii"),
             "symbol": "t",
             "repo_scope": "assume:repo:hg.mozilla.org/try:action:generic",
             "cb_name": "test_action",
         },
         "input": {},
         "parameters": {},
         "now": current_json_time(),
         "taskId": slugid.nice().encode("ascii"),
         "ownTaskId": slugid.nice().encode("ascii"),
         "clientId": "testing/testing/testing",
     }
     rendered = jsone.render(self.taskcluster_yml, context)
     pprint.pprint(rendered)
     self.assertEqual(rendered["tasks"][0]["metadata"]["name"],
                      "Action: Test Action")
Example #2
0
async def bigwig(watcher, event):
    uuid = slugid.nice()
    await watcher.tilesets.update({
        slugid.nice():
        TileSet(uuid=uuid,
                datafile=str(event.path),
                datatype="vector",
                filetype="bigwig").todict()
    })
async def test_run_successful_task(context_function):
    task_id = slugid.nice()
    task_group_id = slugid.nice()
    async with context_function(None) as context:
        result = await create_task(context, task_id, task_group_id)
        assert result['status']['state'] == 'pending'
        async with remember_cwd():
            os.chdir(os.path.dirname(context.config['work_dir']))
            status = await worker.run_tasks(context)
        assert status == 1
        result = await task_status(context, task_id)
        assert result['status']['state'] == 'failed'
async def test_run_successful_task(context_function):
    task_id = slugid.nice()
    task_group_id = slugid.nice()
    async with context_function(None) as context:
        result = await create_task(context, task_id, task_group_id)
        assert result['status']['state'] == 'pending'
        async with remember_cwd():
            os.chdir(os.path.dirname(context.config['work_dir']))
            status = await worker.run_tasks(context)
        assert status == 1
        result = await task_status(context, task_id)
        assert result['status']['state'] == 'failed'
Example #5
0
 async def test_run_successful_task(self, event_loop, context_function):
     task_id = slugid.nice().decode('utf-8')
     task_group_id = slugid.nice().decode('utf-8')
     with context_function(None) as context:
         result = await create_task(context, task_id, task_group_id)
         assert result['status']['state'] == 'pending'
         with remember_cwd():
             os.chdir("integration")
             status = await worker.run_loop(context, creds_key="integration_credentials")
         assert status == 1
         result = await task_status(context, task_id)
         assert result['status']['state'] == 'failed'
Example #6
0
 async def test_run_successful_task(self, event_loop, context_function):
     task_id = slugid.nice().decode('utf-8')
     task_group_id = slugid.nice().decode('utf-8')
     with context_function(None) as context:
         result = await create_task(context, task_id, task_group_id)
         assert result['status']['state'] == 'pending'
         with remember_cwd():
             os.chdir("integration")
             status = await worker.run_loop(
                 context, creds_key="integration_credentials")
         assert status == 1
         result = await task_status(context, task_id)
         assert result['status']['state'] == 'failed'
Example #7
0
def test_run_successful_task(event_loop, context_function):
    task_id = slugid.nice().decode('utf-8')
    task_group_id = slugid.nice().decode('utf-8')
    with context_function(None) as context:
        result = event_loop.run_until_complete(
            create_task(context, task_id, task_group_id))
        assert result['status']['state'] == 'pending'
        with remember_cwd():
            os.chdir(os.path.dirname(context.config['work_dir']))
            status = event_loop.run_until_complete(
                worker.run_loop(context, creds_key="integration_credentials"))
        assert status == 1
        result = event_loop.run_until_complete(task_status(context, task_id))
        assert result['status']['state'] == 'failed'
def build_config(override, basedir):
    randstring = slugid.nice()[0:6]
    config = get_unfrozen_copy(DEFAULT_CONFIG)
    ED25519_DIR = os.path.join(os.path.dirname(__file__), "data", "ed25519")
    config.update({
        'log_dir': os.path.join(basedir, "log"),
        'artifact_dir': os.path.join(basedir, "artifact"),
        'task_log_dir': os.path.join(basedir, "artifact", "public", "logs"),
        'work_dir': os.path.join(basedir, "work"),
        "worker_type": "dummy-worker-{}".format(randstring),
        "worker_id": "dummy-worker-{}".format(randstring),
        'artifact_upload_timeout': 60 * 2,
        'poll_interval': 5,
        'reclaim_interval': 5,
        'task_script': ('bash', '-c', '>&2 echo bar && echo foo && sleep 9 && exit 1'),
        'task_max_timeout': 60,
        'cot_product': 'firefox',
        'ed25519_private_key_path': os.path.join(ED25519_DIR, 'scriptworker_private_key'),
        'ed25519_public_keys': {
            'docker-worker': ['8dBv4bbnZ3RsDzQiPKTJ18uo3hq5Rjm94JG6HXzAcBM='],
            'generic-worker': ['PkI5NslA78wSsYaKNzKq7iD7MLQy7W6wYO/0WFd4tWM='],
            'scriptworker': ['KxYrV3XAJ3uOyAUX0Wcl1Oeu6GSMrI/5hOn39q8Lf0I='],
        },
    })
    creds = read_integration_creds()
    del(config['credentials'])
    if isinstance(override, dict):
        config.update(override)
    with open(os.path.join(basedir, "config.json"), "w") as fh:
        json.dump(config, fh, indent=2, sort_keys=True)
    config = apply_product_config(config)
    return config, creds
async def test_private_artifacts(context_function):
    task_group_id = task_id = slugid.nice()
    override = {
        'task_script': (
            'bash', '-c',
            '>&2 echo'
        ),
    }
    async with context_function(override) as context:
        result = await create_task(context, task_id, task_group_id)
        assert result['status']['state'] == 'pending'
        path = os.path.join(context.config['artifact_dir'], 'SampleArtifacts/_/X.txt')
        utils.makedirs(os.path.dirname(path))
        with open(path, "w") as fh:
            fh.write("bar")
        async with remember_cwd():
            os.chdir(os.path.dirname(context.config['work_dir']))
            status = await worker.run_tasks(context)
        assert status == 0
        result = await task_status(context, task_id)
        assert result['status']['state'] == 'completed'
        url = artifacts.get_artifact_url(context, task_id, 'SampleArtifacts/_/X.txt')
        path2 = os.path.join(context.config['work_dir'], 'downloaded_file')
        await utils.download_file(context, url, path2)
        with open(path2, "r") as fh:
            contents = fh.read().strip()
        assert contents == 'bar'
 def test_cron(self):
     context = {
         "tasks_for": "cron",
         "repository": {
             "url": "https://hg.mozilla.org/mozilla-central",
             "project": "mozilla-central",
             "level": 3,
         },
         "push": {
             "revision": "e8aebe488b2f2e567940577de25013d00e818f7c",
             "pushlog_id": -1,
             "pushdate": 0,
             "owner": "cron",
         },
         "cron": {
             "task_id": "<cron task id>",
             "job_name": "test",
             "job_symbol": "T",
             "quoted_args": "abc def",
         },
         "now": current_json_time(),
         "ownTaskId": slugid.nice().encode("ascii"),
     }
     rendered = jsone.render(self.taskcluster_yml, context)
     pprint.pprint(rendered)
     self.assertEqual(rendered["tasks"][0]["metadata"]["name"],
                      "Decision Task for cron job test")
async def test_cancel_task():
    task_id = slugid.nice()
    partial_config = {
        'invalid_reclaim_status': 19,
        'task_script': ('bash', '-c', '>&2 echo bar && echo foo && sleep 30 && exit 1'),
    }
    # Don't use temporary credentials from claimTask, since they don't allow us
    # to cancel the created task.
    async with get_context(partial_config) as context:
        result = await create_task(context, task_id, task_id)
        assert result['status']['state'] == 'pending'
        cancel_fut = asyncio.ensure_future(do_cancel(context, task_id))
        task_fut = asyncio.ensure_future(run_task_until_stopped(context))
        await utils.raise_future_exceptions([cancel_fut, task_fut])
        status = await context.queue.status(task_id)
        assert len(status['status']['runs']) == 1
        assert status['status']['state'] == 'exception'
        assert status['status']['runs'][0]['reasonResolved'] == 'canceled'
        log_url = context.queue.buildUrl(
            'getLatestArtifact', task_id, 'public/logs/live_backing.log'
        )
        log_path = os.path.join(context.config['work_dir'], 'log')
        await utils.download_file(context, log_url, log_path)
        with open(log_path) as fh:
            contents = fh.read()
        assert contents.rstrip() == "bar\nfoo\nAutomation Error: python exited with signal -15"
Example #12
0
 def retrigger(self, task_id, count=1, retries=5):
     payload = self.queue.task(task_id)
     now = taskcluster.fromNow("0 days")
     created = datetime.strptime(payload["created"], _DATE_FMT)
     deadline = datetime.strptime(payload["deadline"], _DATE_FMT)
     expiration = datetime.strptime(payload["expires"], _DATE_FMT)
     to_dead = deadline - created
     to_expire = expiration - created
     payload["deadline"] = taskcluster.stringDate(
         taskcluster.fromNow(
             "%d days %d seconds" % (to_dead.days, to_dead.seconds), now))
     payload["expires"] = taskcluster.stringDate(
         taskcluster.fromNow(
             "%d days %d seconds" % (to_expire.days, to_expire.seconds),
             now))
     payload["created"] = taskcluster.stringDate(now)
     payload["retries"] = 0
     rv = []
     while count > 0:
         new_id = slugid.nice()
         r = retries
         while r > 0:
             try:
                 rv.append(self.queue.createTask(new_id, payload))
                 break
             except Exception as e:
                 r -= 1
                 logger.warning(traceback.format_exc(e))
         count -= 1
     return rv or None
Example #13
0
def viewconfs(request):
    '''
    Retrieve a viewconfs with a given uid

    Args:

    request (django.http.HTTPRequest): The request object containing the
        uid (e.g. d=hg45ksdjfds) that identifies the viewconf.

    Return:

    '''
    if request.method == 'POST':
        uuid = slugid.nice()
        viewconf = request.body

        serializer = tss.ViewConfSerializer(data={'viewconf': request.body})
        if not serializer.is_valid():
            return JsonResponse(
                {'error': 'Serializer not valid'},
                status=rfs.HTTP_400_BAD_REQUEST
            )

        serializer.save(uuid=uuid, viewconf=viewconf)

        return JsonResponse({'uid': uuid})

    uuid = request.GET.get('d')

    obj = tm.ViewConf.objects.get(uuid=uuid)
    return JsonResponse(json.loads(obj.viewconf))
async def test_private_artifacts(context_function):
    task_group_id = task_id = slugid.nice()
    override = {
        'task_script': ('bash', '-c', '>&2 echo'),
    }
    async with context_function(override) as context:
        result = await create_task(context, task_id, task_group_id)
        assert result['status']['state'] == 'pending'
        path = os.path.join(context.config['artifact_dir'],
                            'SampleArtifacts/_/X.txt')
        utils.makedirs(os.path.dirname(path))
        with open(path, "w") as fh:
            fh.write("bar")
        async with remember_cwd():
            os.chdir(os.path.dirname(context.config['work_dir']))
            status = await worker.run_tasks(context)
        assert status == 0
        result = await task_status(context, task_id)
        assert result['status']['state'] == 'completed'
        url = artifacts.get_artifact_url(context, task_id,
                                         'SampleArtifacts/_/X.txt')
        path2 = os.path.join(context.config['work_dir'], 'downloaded_file')
        await utils.download_file(context, url, path2)
        with open(path2, "r") as fh:
            contents = fh.read().strip()
        assert contents == 'bar'
Example #15
0
 def __init__(self,
              tileset_info=None,
              tiles=None,
              chromsizes=lambda: None,
              uuid=None,
              private=False,
              name='',
              datatype=''):
     '''
     Parameters
     ----------
     tileset_info: function
         A function returning the information (min_pos, max_pos, max_width, max_zoom),
         for this tileset.
     tiles: function
         A function returning tile data for this tileset
     '''
     self.name = name
     self.tileset_info_fn = tileset_info
     self.tiles_fn = tiles
     self.chromsizes_fn = chromsizes
     self.private = private
     if uuid is not None:
         self.uuid = uuid
     else:
         self.uuid = slugid.nice().decode('utf-8')
Example #16
0
    def show(self):
        # credit for this code goes to the higlass-python team: https://github.com/higlass/higlass-python
        for p in list(self.processes.keys()):
            if self.port == p:
                print("delete ", self.processes[p])
                self.processes[p].kill()
                del self.processes[p]
                time.sleep(0.5)
        #print(self.processes)
        uuid = slugid.nice()

        target = partial(eventlet.wsgi.server,
                         sock=eventlet.listen(('localhost', self.port)),
                         site=fApp)

        self.processes[self.port] = mp.Process(
            target=target)  #self.startServer, args=(q,))
        self.processes[self.port].start()

        self.connected = False
        while not self.connected:
            try:
                url = "http://{}:{}/".format('localhost', self.port)
                r = requests.head(url)
                if r.ok:
                    self.connected = True
            except requests.ConnectionError:
                time.sleep(0.2)
Example #17
0
    def start(self, log_file='/tmp/hgserver.log', log_level=logging.INFO):

        for puid in list(self.processes.keys()):
            print("terminating:", puid)
            self.processes[puid].terminate()
            del self.processes[puid]

        self.app = create_app(
            self.tilesets,
            __name__,
            log_file=log_file,
            log_level=log_level)

        # we're going to assign a uuid to each server process so that if anything
        # goes wrong, the variable referencing the process doesn't get lost
        uuid = slugid.nice().decode('utf8')
        if self.port is None:
            self.port = get_open_port()
        target = partial(self.app.run,
                         threaded=True,
                         debug=True,
                         host='0.0.0.0',
                         port=self.port,
                         use_reloader=False)
        self.processes[uuid] = mp.Process(target=target)
        self.processes[uuid].start()
        self.connected = False
        while not self.connected:
            try:
                url = 'http://{}:{}/api/v1'.format(self.host, self.port)
                r = requests.head(url)
                if r.ok:
                    self.connected = True
            except requests.ConnectionError as err:
                time.sleep(.2)
Example #18
0
class Tileset(models.Model):
    created = models.DateTimeField(auto_now_add=True)
    uuid = models.CharField(max_length=100,
                            unique=True,
                            default=lambda: slugid.nice().decode('utf-8'))
    # processed_file = models.TextField()
    datafile = models.FileField(upload_to='uploads')
    filetype = models.TextField()
    datatype = models.TextField(default='unknown')

    coordSystem = models.TextField()
    coordSystem2 = models.TextField(default='')

    owner = models.ForeignKey(
        'auth.User',
        related_name='tilesets',
        on_delete=models.CASCADE,
        blank=True,
        null=True  # Allow anonymous owner
    )
    private = models.BooleanField(default=False)
    name = models.TextField(blank=True)

    class Meta:
        ordering = ('created', )
        permissions = (('view_tileset', "View tileset"), )

    def __str__(self):
        '''
        Get a string representation of this model. Hopefully useful for the
        admin interface.
        '''
        return "Tileset [name: " + self.name + '] [ft: ' + self.filetype + ']'
Example #19
0
def build_config(override, basedir):
    randstring = slugid.nice()[0:6].decode('utf-8')
    config = dict(deepcopy(DEFAULT_CONFIG))
    GPG_HOME = os.path.join(os.path.basename(__file__), "data", "gpg")
    config.update({
        'log_dir': os.path.join(basedir, "log"),
        'artifact_dir': os.path.join(basedir, "artifact"),
        'task_log_dir': os.path.join(basedir, "artifact", "public", "logs"),
        'work_dir': os.path.join(basedir, "work"),
        "worker_type": "dummy-worker-{}".format(randstring),
        "worker_id": "dummy-worker-{}".format(randstring),
        'artifact_upload_timeout': 60 * 2,
        'artifact_expiration_hours': 1,
        'gpg_home': GPG_HOME,
        "gpg_encoding": 'utf-8',
        "gpg_options": None,
        "gpg_path": os.environ.get("GPG_PATH", None),
        "gpg_public_keyring": os.path.join(GPG_HOME, "pubring.gpg"),
        "gpg_secret_keyring": os.path.join(GPG_HOME, "secring.gpg"),
        "gpg_use_agent": None,
        'reclaim_interval': 5,
        'credential_update_interval': .1,
        'task_script': ('bash', '-c', '>&2 echo bar && echo foo && sleep 9 && exit 1'),
        'task_max_timeout': 60,
    })
    creds = read_integration_creds()
    del(config['credentials'])
    if isinstance(override, dict):
        config.update(override)
    with open(os.path.join(basedir, "config.json"), "w") as fh:
        json.dump(config, fh, indent=2, sort_keys=True)
    return config, creds
async def test_cancel_task():
    task_id = slugid.nice()
    partial_config = {
        "invalid_reclaim_status":
        19,
        "task_script":
        ("bash", "-c", ">&2 echo bar && echo foo && sleep 30 && exit 1")
    }
    # Don't use temporary credentials from claimTask, since they don't allow us
    # to cancel the created task.
    async with get_context(partial_config) as context:
        result = await create_task(context, task_id, task_id)
        assert result["status"]["state"] == "pending"
        cancel_fut = asyncio.ensure_future(do_cancel(context, task_id))
        task_fut = asyncio.ensure_future(run_task_until_stopped(context))
        await utils.raise_future_exceptions([cancel_fut, task_fut])
        status = await context.queue.status(task_id)
        assert len(status["status"]["runs"]) == 1
        assert status["status"]["state"] == "exception"
        assert status["status"]["runs"][0]["reasonResolved"] == "canceled"
        log_url = context.queue.buildUrl("getLatestArtifact", task_id,
                                         "public/logs/live_backing.log")
        log_path = os.path.join(context.config["work_dir"], "log")
        await utils.download_file(context, log_url, log_path)
        with open(log_path) as fh:
            contents = fh.read()
        assert contents.rstrip(
        ) == "bar\nfoo\nAutomation Error: python exited with signal -15"
Example #21
0
def mozharness_on_buildbot_bridge(config, job, taskdesc):
    run = job['run']
    worker = taskdesc['worker']
    branch = config.params['project']
    product = run.get('index', {}).get('product', 'firefox')

    worker.pop('env', None)

    if 'devedition' in job['attributes']['build_platform']:
        buildername = 'OS X 10.7 {} devedition build'.format(branch)
    else:
        buildername = 'OS X 10.7 {} build'.format(branch)

    worker.update({
        'buildername': buildername,
        'sourcestamp': {
            'branch': branch,
            'repository': config.params['head_repository'],
            'revision': config.params['head_rev'],
        },
        'properties': {
            'product': product,
            'who': config.params['owner'],
            'upload_to_task_id': slugid.nice(),
        }
    })
async def test_cancel_task():
    task_id = slugid.nice()
    partial_config = {
        'invalid_reclaim_status':
        19,
        'task_script':
        ('bash', '-c', '>&2 echo bar && echo foo && sleep 30 && exit 1'),
    }
    # Don't use temporary credentials from claimTask, since they don't allow us
    # to cancel the created task.
    async with get_context(partial_config) as context:
        result = await create_task(context, task_id, task_id)
        assert result['status']['state'] == 'pending'
        cancel_fut = asyncio.ensure_future(do_cancel(context, task_id))
        task_fut = asyncio.ensure_future(run_task_until_stopped(context))
        await utils.raise_future_exceptions([cancel_fut, task_fut])
        status = await context.queue.status(task_id)
        assert len(status['status']['runs']) == 1
        assert status['status']['state'] == 'exception'
        assert status['status']['runs'][0]['reasonResolved'] == 'canceled'
        log_url = context.queue.buildUrl('getLatestArtifact', task_id,
                                         'public/logs/live_backing.log')
        log_path = os.path.join(context.config['work_dir'], 'log')
        await utils.download_file(context, log_url, log_path)
        with open(log_path) as fh:
            contents = fh.read()
        assert contents.rstrip(
        ) == "bar\nfoo\nAutomation Error: python exited with signal -15"
Example #23
0
    def __init__(
        self,
        tileset_info=None,
        tiles=None,
        chromsizes=lambda: None,
        uuid=None,
        private=False,
        name="",
        datatype="",
        track_type=None,
        track_position=None,
    ):
        """
        Parameters
        ----------
        tileset_info: function
            A function returning the information (min_pos, max_pos, max_width, max_zoom),
            for this tileset.
        tiles: function
            A function returning tile data for this tileset
        """
        self.name = name
        self.datatype = datatype
        self.tileset_info_fn = tileset_info
        self.tiles_fn = tiles
        self.chromsizes_fn = chromsizes
        self.private = private
        self.track_type = None
        self.track_position = None

        if uuid is not None:
            self.uuid = uuid
        else:
            self.uuid = slugid.nice()
Example #24
0
def build_config(override):
    cwd = os.getcwd()
    basedir = os.path.join(cwd, "integration")
    if not os.path.exists(basedir):
        os.makedirs(basedir)
    randstring = slugid.nice()[0:6].decode('utf-8')
    config = deepcopy(DEFAULT_CONFIG)
    config.update({
        'log_dir': os.path.join(basedir, "log"),
        'artifact_dir': os.path.join(basedir, "artifact"),
        'work_dir': os.path.join(basedir, "work"),
        "worker_type": "dummy-worker-{}".format(randstring),
        "worker_id": "dummy-worker-{}".format(randstring),
        'artifact_upload_timeout': 60 * 2,
        'artifact_expiration_hours': 1,
        'reclaim_interval': 5,
        'credential_update_interval': .1,
        'task_script': ('bash', '-c', '>&2 echo bar && echo foo && sleep 9 && exit 1'),
        'task_max_timeout': 60,
    })
    creds = read_integration_creds()
    del(config['credentials'])
    if isinstance(override, dict):
        config.update(override)
    with open(os.path.join(basedir, "config.json"), "w") as fh:
        json.dump(config, fh, indent=2, sort_keys=True)
    return config, creds
Example #25
0
def invalidate_cloudfront_cache(paths):
    sanitized_paths = [
        path if path.startswith('/') else '/{}'.format(path) for path in paths
    ]
    number_of_paths = len(sanitized_paths)
    print('Invalidating {} CloudFront paths: {}'.format(
        number_of_paths, sanitized_paths))

    distribution_id = os.environ.get('CLOUDFRONT_DISTRIBUTION_ID', None)
    if distribution_id:
        request_id = slugid.nice()

        try:
            cloudfront.create_invalidation(DistributionId=distribution_id,
                                           InvalidationBatch={
                                               'Paths': {
                                                   'Quantity': number_of_paths,
                                                   'Items': sanitized_paths,
                                               },
                                               'CallerReference': request_id,
                                           })
        except ClientError as e:
            print('WARN: Could not invalidate cache. Reason: {}'.format(e))
    else:
        print('CLOUDFRONT_DISTRIBUTION_ID not set. No cache to invalidate.')
def build_config(override, basedir):
    randstring = slugid.nice()[0:6]
    config = get_unfrozen_copy(DEFAULT_CONFIG)
    GPG_HOME = os.path.join(os.path.dirname(__file__), "data", "gpg")
    ED25519_DIR = os.path.join(os.path.dirname(__file__), "data", "ed25519")
    config.update({
        'log_dir':
        os.path.join(basedir, "log"),
        'artifact_dir':
        os.path.join(basedir, "artifact"),
        'task_log_dir':
        os.path.join(basedir, "artifact", "public", "logs"),
        'work_dir':
        os.path.join(basedir, "work"),
        "worker_type":
        "dummy-worker-{}".format(randstring),
        "worker_id":
        "dummy-worker-{}".format(randstring),
        'artifact_upload_timeout':
        60 * 2,
        'gpg_home':
        GPG_HOME,
        "gpg_encoding":
        'utf-8',
        "gpg_options":
        None,
        "gpg_path":
        os.environ.get("GPG_PATH", None),
        "gpg_public_keyring":
        os.path.join(GPG_HOME, "pubring.gpg"),
        "gpg_secret_keyring":
        os.path.join(GPG_HOME, "secring.gpg"),
        "gpg_use_agent":
        None,
        'poll_interval':
        5,
        'reclaim_interval':
        5,
        'task_script':
        ('bash', '-c', '>&2 echo bar && echo foo && sleep 9 && exit 1'),
        'task_max_timeout':
        60,
        'cot_product':
        'firefox',
        'ed25519_private_key_path':
        os.path.join(ED25519_DIR, 'scriptworker_private_key'),
        'ed25519_public_keys': {
            'docker-worker': ['8dBv4bbnZ3RsDzQiPKTJ18uo3hq5Rjm94JG6HXzAcBM='],
            'generic-worker': ['PkI5NslA78wSsYaKNzKq7iD7MLQy7W6wYO/0WFd4tWM='],
            'scriptworker': ['KxYrV3XAJ3uOyAUX0Wcl1Oeu6GSMrI/5hOn39q8Lf0I='],
        },
    })
    creds = read_integration_creds()
    del (config['credentials'])
    if isinstance(override, dict):
        config.update(override)
    with open(os.path.join(basedir, "config.json"), "w") as fh:
        json.dump(config, fh, indent=2, sort_keys=True)
    config = apply_product_config(config)
    return config, creds
Example #27
0
def slugid_nice():
    """ Returns a new, random utf-8 slug (based on uuid4).

    :return: slug representation of a new uuid4, as a utf-8 string
    :rtype: str
    """
    return slugid.nice().decode('utf-8')
def as_slugid(name):
    if name not in slugids:
        slugids[name] = slugid.nice().decode()
        print('cache miss', name, slugids[name])
    else:
        print('cache hit', name, slugids[name])
    return slugids[name]
Example #29
0
    def __init__(
        self,
        tilesets,
        host="localhost",
        port=None,
        name=None,
        fuse=True,
        tmp_dir=OS_TEMPDIR,
        log_level=logging.INFO,
        log_file=None,
    ):
        self.name = name or __name__.split(".")[0] + '-' + slugid.nice()[:8]
        self.tilesets = tilesets
        self.host = host
        self.port = port
        if fuse:
            self.fuse_process = FuseProcess(op.join(tmp_dir, 'higlass-python'))
            self.fuse_process.setup()
        else:
            self.fuse_process = None

        self.app = create_app(self.name, self.tilesets, fuse=self.fuse_process)
        if log_file:
            self.log = None
            handler = logging.handlers.RotatingFileHandler(log_file,
                                                           maxBytes=100000,
                                                           backupCount=1)
        else:
            self.log = StringIO()
            handler = logging.StreamHandler(self.log)

        handler.setLevel(log_level)
        self.app.logger.addHandler(handler)
def ingest(filename=None, datatype=None, filetype=None, coordSystem='', coordSystem2='', uid=None, name=None, no_upload=False, project_name='', temporary=False, **ignored):
    uid = uid or slugid.nice().decode('utf-8')
    name = name or op.split(filename)[1]

    if not filetype:
        raise CommandError('Filetype has to be specified')

    django_file = None

    # if we're ingesting a url, place it relative to the httpfs directories
    # and append two dots at the end
    if filename[:7] == 'http://':
        filename = "{}..".format(filename.replace('http:/', 'http'))
        no_upload=True
    if filename[:8] == 'https://':
        filename = "{}..".format(filename.replace('https:/', 'https'))
        no_upload=True
    if filename[:6] == 'ftp://':
        filename = "{}..".format(filename.replace('ftp:/', 'ftp'))
        no_upload=True

    # it's a regular file on the filesystem, not a file being entered as a url
    if no_upload:
        if (not op.isfile(op.join(settings.MEDIA_ROOT, filename)) and
            not op.islink(op.join(settings.MEDIA_ROOT, filename)) and
            not any([filename.startswith('http/'), filename.startswith('https/'), filename.startswith('ftp/')])):
            raise CommandError('File does not exist under media root')
        filename = op.join(settings.MEDIA_ROOT, filename)
        django_file = filename
    else:
        if os.path.islink(filename):
            django_file = File(open(os.readlink(filename),'rb'))
        else:
            django_file = File(open(filename,'rb'))

        # remove the filepath of the filename
        django_file.name = op.split(django_file.name)[1]

    if filetype.lower() == 'bigwig':
        coordSystem = check_for_chromsizes(filename, coordSystem)

    try:
        project_obj = tm.Project.objects.get(name=project_name)
    except dce.ObjectDoesNotExist:
        project_obj = tm.Project.objects.create(
            name=project_name
        )

    return tm.Tileset.objects.create(
        datafile=django_file,
        filetype=filetype,
        datatype=datatype,
        coordSystem=coordSystem,
        coordSystem2=coordSystem2,
        owner=None,
        project=project_obj,
        uuid=uid,
        temporary=temporary,
        name=name)
Example #31
0
    def __init__(self,
                 tracks=[],
                 x=0,
                 y=0,
                 width=12,
                 height=6,
                 initialXDomain=None,
                 initialYDomain=None,
                 uid=None):
        '''
        Add a new view

        Parameters
        --------
        tracks: []
            A list of Tracks to include in this view
        x: int
            The position of this view on the grid
        y: int
            The position of this view on the grid
        width: int
            The width of this of view on a 12 unit grid
        height: int
            The height of the this view. The height is proportional
            to the height of all the views present.
        initialXDoamin: [int, int]
            The initial x range of the view
        initialYDomain: [int, int]
            The initial y range of the view
        uid: string
            The uid of new view
        '''
        if uid is None:
            uid = slugid.nice().decode('utf8')

        self.tracks = tracks
        self.uid = uid

        self.viewconf = {
            'uid': uid,
            'tracks': {
                'top': [],
                'center': [],
                'left': [],
                'right': [],
                'bottom': []
            },
            "layout": {
                "w": width,
                "h": height,
                "x": x,
                "y": y
            },
        }

        if initialXDomain is not None:
            self.viewconf['initialXDomain'] = initialXDomain
        if initialYDomain is not None:
            self.viewconf['initialYDomain'] = initialYDomain
Example #32
0
 def phase_signoffs(self, phase):
     return [
         Signoff(uid=slugid.nice(),
                 name=req["name"],
                 description=req["description"],
                 permissions=req["permissions"]) for req in SIGNOFFS.get(
                     self.branch, {}).get(self.product, {}).get(phase, [])
     ]
Example #33
0
    def add_sync(self, locks_name, views_to_sync):
        lock_id = slugid.nice()
        for view_uid in [v.uid for v in views_to_sync]:
            if lock_id not in self.viewconf[locks_name]['locksDict']:
                self.viewconf[locks_name]['locksDict'][lock_id] = {}

            self.viewconf[locks_name]["locksDict"][lock_id][view_uid] = (1, 1, 1)
            self.viewconf[locks_name]["locksByViewUid"][view_uid] = lock_id
Example #34
0
 def phase_signoffs(self, phase):
     return [
         XPISignoff(uid=slugid.nice(),
                    name=req["name"],
                    description=req["description"],
                    permissions=req["permissions"]) for req in SIGNOFFS.get(
                        "xpi", {}).get(self.xpi_type, {}).get(phase, [])
     ]
 def phase_signoffs(branch, product, phase):
     return [
         Signoff(uid=slugid.nice(),
                 name=req['name'],
                 description=req['description'],
                 permissions=req['permissions'])
         for req in shipit_api.config.SIGNOFFS.get(branch, {}).get(
             product, {}).get(phase, [])
     ]
Example #36
0
async def bam_register(watcher, event, bam_index_path):
    if bam_index_path.exists():
        uuid = slugid.nice()
        await watcher.tilesets.update({
            uuid:
            TileSet(uuid=uuid,
                    datafile=str(event.path),
                    datatype="reads",
                    filetype="bam",
                    indexfile=str(bam_index_path)).todict()
        })
Example #37
0
 def phase_signoffs(branch, product, phase):
     return [
         Signoff(
             uid=slugid.nice(),
             name=req['name'],
             description=req['description'],
             permissions=req['permissions']
         )
         for req in
         shipit_api.config.SIGNOFFS.get(branch, {}).get(product, {}).get(phase, [])
     ]
async def test_run_maxtimeout(context_function):
    task_id = slugid.nice()
    partial_config = {
        'task_max_timeout': 2,
        'task_script': ('bash', '-c', '>&2 echo bar && echo foo && sleep 30 && exit 1'),
    }
    async with context_function(partial_config) as context:
        result = await create_task(context, task_id, task_id)
        assert result['status']['state'] == 'pending'
        async with remember_cwd():
            os.chdir(os.path.dirname(context.config['work_dir']))
            status = await worker.run_tasks(context)
            assert status == context.config['task_max_timeout_status']
Example #39
0
 def test_run_maxtimeout(self, event_loop, context_function):
     task_id = slugid.nice().decode('utf-8')
     task_group_id = slugid.nice().decode('utf-8')
     partial_config = {
         'task_max_timeout': 2,
     }
     with context_function(partial_config) as context:
         result = event_loop.run_until_complete(
             create_task(context, task_id, task_group_id)
         )
         assert result['status']['state'] == 'pending'
         with remember_cwd():
             os.chdir("integration")
             with pytest.raises(RuntimeError):
                 event_loop.run_until_complete(
                     worker.run_loop(context, creds_key="integration_credentials")
                 )
                 # Because we're using asyncio to kill tasks in the loop,
                 # we're going to hit a RuntimeError
         result = event_loop.run_until_complete(task_status(context, task_id))
         # TODO We need to be able to ensure this is 'failed'.
         assert result['status']['state'] in ('failed', 'running')
Example #40
0
def generate_action_task(decision_task_id, action_name, input_, actions):
    target_action = find_action(action_name, actions)
    context = copy.deepcopy(actions['variables'])  # parameters
    action_task_id = slugid.nice()
    context.update({
        'input': input_,
        'taskGroupId': decision_task_id,
        'ownTaskId': action_task_id,
        'taskId': None,
        'task': None,
    })
    action_task = copy.deepcopy(target_action['task'])
    log.info('TASK: %s', action_task)
    return action_task_id, action_task, context
Example #41
0
def generate_action_task(decision_task_id, action_task_input):
    actions = fetch_actions_json(decision_task_id)
    relpro = find_action("release-promotion", actions)
    context = copy.deepcopy(actions["variables"])  # parameters
    action_task_id = slugid.nice()
    context.update({
        "input": action_task_input,
        "ownTaskId": action_task_id,
        "taskId": None,
        "task": None,
        "taskGroupId": decision_task_id,
    })
    action_task = jsone.render(relpro["task"], context)
    return action_task_id, action_task
async def test_shutdown():
    task_id = slugid.nice()
    partial_config = {
        'task_script': ('bash', '-c', '>&2 echo running task script && sleep 30 && exit 1'),
    }
    # Don't use temporary credentials from claimTask, since they don't allow us
    # to cancel the created task.
    async with get_context(partial_config) as context:
        result = await create_task(context, task_id, task_id)
        assert result['status']['state'] == 'pending'
        fake_cot_log = os.path.join(context.config['artifact_dir'], 'public', 'logs', 'chain_of_trust.log')
        fake_other_artifact = os.path.join(context.config['artifact_dir'], 'public', 'artifact.apk')

        with open(fake_cot_log, 'w') as file:
            file.write('CoT logs')
        with open(fake_other_artifact, 'w') as file:
            file.write('unrelated artifact')
        cancel_fut = asyncio.ensure_future(do_shutdown(context))
        task_fut = asyncio.ensure_future(run_task_until_stopped(context))
        await utils.raise_future_exceptions([cancel_fut, task_fut])
        status = await context.queue.status(task_id)
        assert len(status['status']['runs']) == 2  # Taskcluster should create a replacement task
        assert status['status']['runs'][0]['state'] == 'exception'
        assert status['status']['runs'][0]['reasonResolved'] == 'worker-shutdown'
        log_url = context.queue.buildUrl(
            'getArtifact', task_id, 0, 'public/logs/live_backing.log'
        )
        cot_log_url = context.queue.buildUrl(
            'getArtifact', task_id, 0, 'public/logs/chain_of_trust.log'
        )
        other_artifact_url = context.queue.buildUrl(
            'getArtifact', task_id, 0, 'public/artifact.apk'
        )
        log_path = os.path.join(context.config['work_dir'], 'log')
        cot_log_path = os.path.join(context.config['work_dir'], 'cot_log')
        other_artifact_path = os.path.join(context.config['work_dir'], 'artifact.apk')
        await utils.download_file(context, log_url, log_path)
        await utils.download_file(context, cot_log_url, cot_log_path)
        with pytest.raises(Download404):
            await utils.download_file(context, other_artifact_url, other_artifact_path)

        with open(log_path) as fh:
            contents = fh.read()
        assert contents.rstrip() == "running task script\nAutomation Error: python exited with signal -15"

        with open(cot_log_path) as fh:
            contents = fh.read()
        assert contents.rstrip() == "CoT logs"
Example #43
0
def mozharness_on_buildbot_bridge(config, job, taskdesc):
    run = job['run']
    worker = taskdesc['worker']
    branch = config.params['project']
    product = run.get('index', {}).get('product', 'firefox')

    worker.pop('env', None)

    worker.update({
        'buildername': 'OS X 10.7 {} build'.format(branch),
        'sourcestamp': {
            'branch': branch,
            'repository': config.params['head_repository'],
            'revision': config.params['head_rev'],
        },
        'properties': {
            'product': product,
            'who': config.params['owner'],
            'upload_to_task_id': slugid.nice(),
        }
    })
Example #44
0
def bb_ci_worker(config, worker):
    worker['properties'].update({
        'who': config.params['owner'],
        'upload_to_task_id': slugid.nice(),
    })
Example #45
0
def cmd(ctx,
        github_commit,
        channel,
        owner,
        pull_request,
        task_id,
        cache_urls,
        nix_instantiate,
        taskcluster_client_id,
        taskcluster_access_token,
        dry_run,
        ):
    '''A tool to be ran on each commit.
    '''

    taskcluster_secret = 'repo:github.com/mozilla-releng/services:branch:' + channel
    if pull_request is not None:
        taskcluster_secret = 'repo:github.com/mozilla-releng/services:pull-request'

    taskcluster_queue = cli_common.taskcluster.get_service('queue')
    taskcluster_notify = cli_common.taskcluster.get_service('notify')

    click.echo(' => Retriving taskGroupId ... ', nl=False)
    with click_spinner.spinner():
        task = taskcluster_queue.task(task_id)
        if 'taskGroupId' not in task:
            please_cli.utils.check_result(1, 'taskGroupId does not exists in task: {}'.format(json.dumps(task)))
        task_group_id = task['taskGroupId']
        please_cli.utils.check_result(0, '')
        click.echo('    taskGroupId: ' + task_group_id)

    if channel in please_cli.config.DEPLOY_CHANNELS:
        taskcluster_notify.irc(dict(channel='#release-services',
                                    message=f'New deployment on {channel} is about to start: https://tools.taskcluster.net/groups/{task_group_id}'))

    message = ('release-services team is about to release a new version of mozilla/release-services '
               '(*.mozilla-releng.net, *.moz.tools). Any alerts coming up soon will be best directed '
               'to #release-services IRC channel. Automated message (such as this) will be send '
               'once deployment is done. Thank you.')

    '''This message will only be sent when channel is production.
    '''
    if channel is 'production':
        for msgChannel in ['#ci', '#moc']:
            taskcluster_notify.irc(dict(channel=msgChannel, message=message))

    click.echo(' => Checking cache which project needs to be rebuilt')
    build_projects = []
    project_hashes = dict()
    for project in sorted(PROJECTS):
        click.echo('     => ' + project)
        project_exists_in_cache, project_hash = ctx.invoke(
            please_cli.check_cache.cmd,
            project=project,
            cache_urls=cache_urls,
            nix_instantiate=nix_instantiate,
            channel=channel,
            indent=8,
            interactive=False,
        )
        project_hashes[project] = project_hash
        if not project_exists_in_cache:
            build_projects.append(project)

    projects_to_deploy = []

    if channel in please_cli.config.DEPLOY_CHANNELS:
        click.echo(' => Checking which project needs to be redeployed')

        # TODO: get status for our index branch
        deployed_projects = {}

        for project_name in sorted(PROJECTS):
            deployed_projects.get(project_name)

            # update hook for each project
            if please_cli.config.PROJECTS_CONFIG[project_name]['update'] is True:

                if channel == 'production':
                    update_hook_nix_path_atttribute = f'updateHook.{channel}.scheduled'
                else:
                    update_hook_nix_path_atttribute = f'updateHook.{channel}.notScheduled'

                projects_to_deploy.append((
                    project_name,
                    [],
                    'TASKCLUSTER_HOOK',
                    {
                        'enable': True,
                        'docker_registry': 'index.docker.io',
                        'docker_repo': 'mozillareleng/services',
                        'name-suffix': '-update-dependencies',
                        'nix_path_attribute': update_hook_nix_path_atttribute,
                    },
                ))

            if deployed_projects == project_hashes[project_name]:
                continue

            if 'deploys' not in please_cli.config.PROJECTS_CONFIG[project_name]:
                continue

            for deploy in please_cli.config.PROJECTS_CONFIG[project_name]['deploys']:
                for deploy_channel in deploy['options']:
                    if channel == deploy_channel:
                        projects_to_deploy.append((
                            project_name,
                            please_cli.config.PROJECTS_CONFIG[project_name].get('requires', []),
                            deploy['target'],
                            deploy['options'][channel],
                        ))

    click.echo(' => Creating taskcluster tasks definitions')
    tasks = []

    # 1. build tasks
    build_tasks = {}
    for index, project in enumerate(sorted(build_projects)):
        project_uuid = slugid.nice().decode('utf-8')
        required = []
        if pull_request is not None:
            required += [
                'CACHE_BUCKET',
                'CACHE_REGION',
            ]
        secrets = cli_common.taskcluster.get_secrets(
            taskcluster_secret,
            project,
            required=required,
            taskcluster_client_id=taskcluster_client_id,
            taskcluster_access_token=taskcluster_access_token,
        )
        build_tasks[project_uuid] = get_build_task(
            index,
            project,
            task_group_id,
            task_id,
            github_commit,
            owner,
            channel,
            taskcluster_secret,
            pull_request is None and secrets.get('CACHE_BUCKET') or None,
            pull_request is None and secrets.get('CACHE_REGION') or None,
        )
        tasks.append((project_uuid, build_tasks[project_uuid]))

    if projects_to_deploy:

        # 2. maintanance on task
        maintanance_on_uuid = slugid.nice().decode('utf-8')
        if len(build_tasks.keys()) == 0:
            maintanance_on_dependencies = [task_id]
        else:
            maintanance_on_dependencies = [i for i in build_tasks.keys()]
        maintanance_on_task = get_task(
            task_group_id,
            maintanance_on_dependencies,
            github_commit,
            channel,
            taskcluster_secret,
            './please -vv tools maintanance:on ' + ' '.join(list(set([i[0] for i in projects_to_deploy]))),
            {
                'name': '2. Maintanance ON',
                'description': '',
                'owner': owner,
                'source': 'https://github.com/mozilla/release-services/tree/' + channel,

            },
        )
        tasks.append((maintanance_on_uuid, maintanance_on_task))

        # 3. deploy tasks (if on production/staging)
        deploy_tasks = {}
        for index, (project, project_requires, deploy_target, deploy_options) in \
                enumerate(sorted(projects_to_deploy, key=lambda x: x[0])):
            try:
                enable = deploy_options['enable']
            except KeyError:
                raise click.ClickException(f'Missing {enable} in project {project} and channel {channel} deploy options')

            if not enable:
                continue

            project_uuid = slugid.nice().decode('utf-8')
            project_task = get_deploy_task(
                index,
                project,
                project_requires,
                deploy_target,
                deploy_options,
                task_group_id,
                maintanance_on_uuid,
                github_commit,
                owner,
                channel,
                taskcluster_secret,
            )
            if project_task:
                deploy_tasks[project_uuid] = project_task
                tasks.append((project_uuid, deploy_tasks[project_uuid]))

        # 4. maintanance off task
        maintanance_off_uuid = slugid.nice().decode('utf-8')
        maintanance_off_task = get_task(
            task_group_id,
            [i for i in deploy_tasks.keys()],
            github_commit,
            channel,
            taskcluster_secret,
            './please -vv tools maintanance:off ' + ' '.join(list(set([i[0] for i in projects_to_deploy]))),
            {
                'name': '4. Maintanance OFF',
                'description': '',
                'owner': owner,
                'source': 'https://github.com/mozilla/release-services/tree/' + channel,

            },
        )
        maintanance_off_task['requires'] = 'all-resolved'
        tasks.append((maintanance_off_uuid, maintanance_off_task))

    click.echo(' => Submitting taskcluster definitions to taskcluster')
    if dry_run:
        tasks2 = {task_id: task for task_id, task in tasks}
        for task_id, task in tasks:
            click.echo(' => %s [taskId: %s]' % (task['metadata']['name'], task_id))
            click.echo('    dependencies:')
            deps = []
            for dep in task['dependencies']:
                depName = '0. Decision task'
                if dep in tasks2:
                    depName = tasks2[dep]['metadata']['name']
                    deps.append('      - %s [taskId: %s]' % (depName, dep))
            for dep in sorted(deps):
                click.echo(dep)
    else:
        for task_id, task in tasks:
            taskcluster_queue.createTask(task_id, task)
Example #46
0
def makeTaskId():
    """Used in testing to generate task ids without talking to TaskCluster."""
    return slugid.nice()
def slugId():
    """ Generate a taskcluster slugid.  This is a V4 UUID encoded into
    URL-Safe Base64 (RFC 4648, sec 5) with '=' padding removed """
    return slugid.nice()
Example #48
0
def make_decision_task(params, symbol, arguments=[], head_rev=None):
    """Generate a basic decision task, based on the root
    .taskcluster.yml"""
    with open('.taskcluster.yml') as f:
        taskcluster_yml = f.read()

    if not head_rev:
        head_rev = params['head_rev']

    # do a cheap and dirty job of the template substitution that mozilla-taskcluster
    # does when it reads .taskcluster.yml
    comment = '"no push -- cron task \'{job_name}\'"'.format(**params),
    replacements = {
        '\'{{{?now}}}?\'': "{'relative-datestamp': '0 seconds'}",
        '{{{?owner}}}?': '*****@*****.**',
        '{{#shellquote}}{{{comment}}}{{/shellquote}}': comment,
        '{{{?source}}}?': params['head_repository'],
        '{{{?url}}}?': params['head_repository'],
        '{{{?project}}}?': params['project'],
        '{{{?level}}}?': params['level'],
        '{{{?revision}}}?': head_rev,
        '\'{{#from_now}}([^{]*){{/from_now}}\'': "{'relative-datestamp': '\\1'}",
        '{{{?pushdate}}}?': '0',
        # treeherder ignores pushlog_id, so set it to -1
        '{{{?pushlog_id}}}?': '-1',
        # omitted as unnecessary
        # {{#as_slugid}}..{{/as_slugid}}
    }
    for pattern, replacement in replacements.iteritems():
        taskcluster_yml = re.sub(pattern, replacement, taskcluster_yml)

    task = yaml.load(taskcluster_yml)['tasks'][0]['task']

    # set some metadata
    task['metadata']['name'] = 'Decision task for cron job ' + params['job_name']
    cron_task_id = os.environ.get('TASK_ID', '<cron task id>')
    descr_md = 'Created by a [cron task](https://tools.taskcluster.net/task-inspector/#{}/)'
    task['metadata']['description'] = descr_md.format(cron_task_id)

    th = task['extra']['treeherder']
    th['groupSymbol'] = 'cron'
    th['symbol'] = symbol

    # add a scope based on the repository, with a cron:<job_name> suffix
    match = re.match(r'https://(hg.mozilla.org)/(.*?)/?$', params['head_repository'])
    if not match:
        raise Exception('Unrecognized head_repository')
    repo_scope = 'assume:repo:{}/{}:cron:{}'.format(
        match.group(1), match.group(2), params['job_name'])
    task.setdefault('scopes', []).append(repo_scope)

    # append arguments, quoted, to the decision task command
    shellcmd = task['payload']['command']
    shellcmd[-1] = shellcmd[-1].rstrip('\n')  # strip yaml artifact
    for arg in arguments:
        shellcmd[-1] += ' ' + pipes.quote(arg)

    task_id = slugid.nice()

    # set taskGroupid = taskId, as expected of decision tasks by other systems.
    # This creates a new taskGroup for this graph.
    task['taskGroupId'] = task_id

    # set the schedulerId based on the level
    task['schedulerId'] = 'gecko-level-{}'.format(params['level'])

    return (task_id, task)