Ejemplo n.º 1
0
    def get(self, *args, **kwargs):
        if 'user' not in kwargs:
            self.raise401()

        user = kwargs['user']

        if args:
            path = parse_path(args[0])
            task = Task.objects(id=path[0]).first()
            if not task:
                self.raise404()
            if user not in task.project.members:
                self.raise401()
            task_data = document_to_json(task, filter_set=_FILTER)
        else:
            project_name = self.get_argument('project', None)
            limit = self.get_argument('limit', None)
            start = self.get_argument('start', None)
            try:
                limit = int(limit)
            except:
                limit = None
            try:
                start = int(start)
            except:
                start = None
            try:
                project_name = parse_path(project_name)[0]
            except IndexError:
                project_name = None
            if project_name:
                project = Project.objects(name=project_name).first()
                if not project:
                    self.raise404()
                if user not in project.members:
                    self.raise403()
                tasks = Task.objects(project=project)
            else:
                projects = Project.objects(members__in=[user]).all()
                tasks = []
                for project in projects:
                    ts = Task.objects(project=project).all()
                    tasks += list(ts)
            if limit and start:
                tasks = tasks[start:start + limit]
            elif limit:
                tasks = tasks[:limit]
            elif start:
                tasks = tasks[start:]
            task_data = query_to_json(tasks, filter_set=_FILTER)
        self.write(task_data)
Ejemplo n.º 2
0
    def get(self, *args, **kwargs):
        if 'user' not in kwargs:
            self.raise401()

        user = kwargs['user']

        if args:
            path = parse_path(args[0])
            task = Task.objects(id=path[0]).first()
            if not task:
                self.raise404()
            if user not in task.project.members:
                self.raise401()
            task_data = document_to_json(task, filter_set=_FILTER)
        else:
            project_name = self.get_argument('project', None)
            limit = self.get_argument('limit', None)
            start = self.get_argument('start', None)
            try:
                limit = int(limit)
            except:
                limit = None
            try:
                start = int(start)
            except:
                start = None
            try:
                project_name = parse_path(project_name)[0]
            except IndexError:
                project_name = None
            if project_name:
                project = Project.objects(name=project_name).first()
                if not project:
                    self.raise404()
                if user not in project.members:
                    self.raise403()
                tasks = Task.objects(project=project)
            else:
                projects = Project.objects(members__in=[user]).all()
                tasks = []
                for project in projects:
                    ts = Task.objects(project=project).all()
                    tasks += list(ts)
            if limit and start:
                tasks = tasks[start: start+limit]
            elif limit:
                tasks = tasks[:limit]
            elif start:
                tasks = tasks[start:]
            task_data = query_to_json(tasks, filter_set=_FILTER)
        self.write(task_data)
Ejemplo n.º 3
0
 def get(self, *args, **kwargs):
     if 'user' not in kwargs:
         self.raise401()
     if not args:
         self.raise404()
     path = parse_path(args[0])
     task = Task.objects(id=path[0]).first()
     if not task:
         self.raise404()
     user = kwargs['user']
     if user not in task.project.members:
         self.raise401()
     limit = self.get_argument('limit', None)
     start = self.get_argument('start', None)
     try:
         limit = int(limit)
     except:
         limit = None
     try:
         start = int(start)
     except:
         start = None
     comments = task.comments
     if limit and start:
         comments = task.comments[start:start+limit]
     elif limit:
         comments = task.comments[:limit]
     elif start:
         comments = task.comments[start:]
     else:
         comments = task.comments
     comment_data = query_to_json(comments, filter_set=_COMMENT_FILTER)
     self.write(comment_data)
Ejemplo n.º 4
0
 def get(self, *args, **kwargs):
     if 'user' not in kwargs:
         self.raise401()
     if not args:
         self.raise404()
     path = parse_path(args[0])
     task = Task.objects(id=path[0]).first()
     if not task:
         self.raise404()
     user = kwargs['user']
     if user not in task.project.members:
         self.raise401()
     limit = self.get_argument('limit', None)
     start = self.get_argument('start', None)
     try:
         limit = int(limit)
     except:
         limit = None
     try:
         start = int(start)
     except:
         start = None
     comments = task.comments
     if limit and start:
         comments = task.comments[start:start + limit]
     elif limit:
         comments = task.comments[:limit]
     elif start:
         comments = task.comments[start:]
     else:
         comments = task.comments
     comment_data = query_to_json(comments, filter_set=_COMMENT_FILTER)
     self.write(comment_data)
Ejemplo n.º 5
0
def deploy_single(path):
    """
    Deploy a single project to S3 and, if configured, to our servers.
    """
    require('settings', provided_by=[production, staging])
    slug, abspath = utils.parse_path(path)
    graphic_root = '%s/%s' % (abspath, slug)
    s3_root = '%s/graphics/%s' % (app_config.PROJECT_SLUG, slug)
    graphic_assets = '%s/assets' % graphic_root
    s3_assets = '%s/assets' % s3_root
    graphic_node_modules = '%s/node_modules' % graphic_root

    graphic_config = load_graphic_config(graphic_root)

    use_assets = getattr(graphic_config, 'USE_ASSETS', True)
    default_max_age = getattr(graphic_config, 'DEFAULT_MAX_AGE',
                              None) or app_config.DEFAULT_MAX_AGE
    assets_max_age = getattr(graphic_config, 'ASSETS_MAX_AGE',
                             None) or app_config.ASSETS_MAX_AGE
    update_copy(path)
    if use_assets:
        error = assets.sync(path)
        if error:
            return

    render.render(path)
    flat.deploy_folder(
        graphic_root,
        s3_root,
        headers={'Cache-Control': 'max-age=%i' % default_max_age},
        ignore=[
            '%s/*' % graphic_assets,
            '%s/*' % graphic_node_modules,
            # Ignore files unused on static S3 server
            '*.xls',
            '*.xlsx',
            '*.pyc',
            '*.py',
            '*.less',
            '*.bak',
            '%s/base_template.html' % graphic_root,
            '%s/child_template.html' % graphic_root,
            '%s/README.md' % graphic_root
        ])

    if use_assets:
        flat.deploy_folder(
            graphic_assets,
            s3_assets,
            headers={'Cache-Control': 'max-age=%i' % assets_max_age},
            ignore=['%s/private/*' % graphic_assets])

    # Need to explicitly point to index.html for the AWS staging link
    file_suffix = ''
    if env.settings == 'staging':
        file_suffix = 'index.html'

    print ''
    print '%s URL: %s/graphics/%s/%s' % (
        env.settings.capitalize(), app_config.S3_BASE_URL, slug, file_suffix)
Ejemplo n.º 6
0
    def get(self, *args, **kwargs):
        # /clients
        # /clients/:app_name
        if 'user' not in kwargs:
            self.raise401()
        user = kwargs['user']

        if args:
            path = parse_path(args[0])
            client = Client.objects(user=user, app_name=path[0]).first()
            if not client:
                self.raise404()
            client_data = document_to_json(client, filter_set=_FILTER)
        else:
            limit = self.get_argument('limit', None)
            start = self.get_argument('start', None)
            try:
                limit = int(limit)
            except:
                limit = None
            try:
                start = int(start)
            except:
                start = None
            clients = Client.objects(user=user)
            if limit and start:
                clients = clients[start: start+limit]
            elif limit:
                clients = clients[:limit]
            elif start:
                clients = clients[start:]
            client_data = query_to_json(clients, filter_set=_FILTER)
        self.write(client_data)
Ejemplo n.º 7
0
 def put(self,  *args, **kwargs):
     if 'user' not in kwargs or not args:
         self.raise401()
     name = self.get_argument('name', None)
     description = self.get_argument('description', None)
     path = self.get_argument('path', None)
     scm = self.get_argument('scm', None)
     team = self.get_argument('team', None)
     tags = self.get_argument('tags', None)
     user = kwargs['user']
     update = {}
     if name:
         update['set__name'] = name
     if description:
         update['set__description'] = description
     if path:
         update['set__path'] = path
     if scm:
         update['set__scm'] = scm
     if team:
         update['set__team'] = team
     if tags:
         tags_list = parse_listed_strs(tags)
         update['set__tags'] = tags_list
     try:
         path = parse_path(args[0])
         Repo.objects(owner=user, name=path[0]).update_one(**update)
         repo = Repo.objects(owner=user, name=name or path[0]).first()
         repo_data = document_to_json(repo, filter_set=_FILTER)
         self.set_status(201)
         self.write(repo_data)
     except Exception as e:
         reason = e.message
         self.raise400(reason=reason)
Ejemplo n.º 8
0
    def put(self, *args, **kwargs):
        if 'user' not in kwargs or not args:
            self.raise401()

        # redirect_uris = self.get_argument('redirect_uris', None)
        app_name = self.get_argument('app_name', None)
        description = self.get_argument('description', None)
        website = self.get_argument('website', None)
        update = {}
        if app_name:
            update['set__app_name'] = app_name
        if description:
            update['set__description'] = description
        if website:
            update['set__website'] = website
        # if redirect_uris:
        #     update['set_redirect_uris'] = parse_listed_strs(redirect_uris)
        user = kwargs['user']
        path = parse_path(args[0])
        client = Client.objects(app_name=path[0]).first()
        if not client or user != client.user:
            self.raise401()
        try:
            Client.objects(app_name=path[0]).update_one(**update)
            client = Client.objects(app_name=app_name or path[0]).first()
            client_data = document_to_json(client, filter_set=_FILTER)
            self.set_status(201)
            self.write(client_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 9
0
    def put(self, *args, **kwargs):
        if 'user' not in kwargs or not args:
            self.raise401()
        update = {}
        user = kwargs['user']
        task_id = parse_path(args[0])[0]
        task = Task.objects(id=task_id).first()
        project = task.project
        if not project or user not in project.members:
            self.raise401()
        category = self.get_argument('category', None)
        description = self.get_argument('description', None)
        project_name = self.get_argument('project', None)
        status = self.get_argument('status', None)
        priority = self.get_argument('priority', None)
        assign_to = self.get_argument('assign_to', None)
        due = self.get_argument('due', None)
        tags = self.get_argument('tags', None)

        if category:
            update['set__category'] = category
        if description:
            update['set__description'] = description
        if project_name:
            project = Project.objects(name=project_name).first()
            if not project or user not in project.members:
                self.raise401()
            update['set__project'] = project
        if assign_to:
            assign_to_list = []
            for member in parse_listed_strs(assign_to):
                u = User.objects(username=member).first()
                if not u:
                    continue
                assign_to_list.append(u)
            update['set__assign_to'] = assign_to_list
        if status:
            update['set__status'] = status
        if priority:
            update['set__priority'] = priority
        if due:
            try:
                due_day = int(due)
            except ValueError:
                due_day = 0
            due_time = get_utc_time(due_day * 24 * 3600)
            update['set__due'] = due_time
        if tags:
            tags_list = parse_listed_strs(tags)
            update['set__tags'] = tags_list
        try:
            Task.objects(id=task_id).update_one(**update)
            task = Task.objects(id=task_id).first()
            task_data = document_to_json(task, filter_set=_FILTER)
            self.set_status(201)
            self.write(task_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 10
0
    def put(self, *args, **kwargs):
        if 'user' not in kwargs or not args:
            self.raise401()
        update = {}
        user = kwargs['user']
        task_id = parse_path(args[0])[0]
        task = Task.objects(id=task_id).first()
        project = task.project
        if not project or user not in project.members:
            self.raise401()
        category = self.get_argument('category', None)
        description = self.get_argument('description', None)
        project_name = self.get_argument('project', None)
        status = self.get_argument('status', None)
        priority = self.get_argument('priority', None)
        assign_to = self.get_argument('assign_to', None)
        due = self.get_argument('due', None)
        tags = self.get_argument('tags', None)

        if category:
            update['set__category'] = category
        if description:
            update['set__description'] = description
        if project_name:
            project = Project.objects(name=project_name).first()
            if not project or user not in project.members:
                self.raise401()
            update['set__project'] = project
        if assign_to:
            assign_to_list = []
            for member in parse_listed_strs(assign_to):
                u = User.objects(username=member).first()
                if not u:
                    continue
                assign_to_list.append(u)
            update['set__assign_to'] = assign_to_list
        if status:
            update['set__status'] = status
        if priority:
            update['set__priority'] = priority
        if due:
            try:
                due_day = int(due)
            except ValueError:
                due_day = 0
            due_time = get_utc_time(due_day * 24 * 3600)
            update['set__due'] = due_time
        if tags:
            tags_list = parse_listed_strs(tags)
            update['set__tags'] = tags_list
        try:
            Task.objects(id=task_id).update_one(**update)
            task = Task.objects(id=task_id).first()
            task_data = document_to_json(task, filter_set=_FILTER)
            self.set_status(201)
            self.write(task_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 11
0
def main(argv):
    # Parse arguments
    args = parse_args(argv)
    print("Args: %s" % str(args))

    # Prepare paths
    image = utils.parse_path(args.image, utils.REPO)
    atlas_image = utils.parse_path(ATLAS_IMAGE, utils.REPO)
    atlas_mask = utils.parse_path(ATLAS_MASK, utils.REPO)
    brain_script = utils.parse_path(BRAIN_SCRIPT, utils.REPO)
    out_path = utils.parse_path(args.output, utils.REPO)

    # Brain preprocessing
    brain_process(image, out_path, atlas_image, atlas_mask, brain_script,
                  N_CPUS)

    print("Done!")
Ejemplo n.º 12
0
def main(argv):
    # Parse arguments
    args = parse_args(argv)
    print("Args: %s" % str(args))

    # Prepare paths
    image = utils.parse_path(args.image, utils.REPO)
    model = utils.parse_path(PATH_ADNET, utils.REPO)
    mean_std = utils.parse_path(PATH_MEAN_STD, utils.REPO)
    out_path = utils.parse_path(args.output, utils.REPO)

    # CNN processing
    out_data = cnn_process(image, model, mean_std, LAYER_NAME)

    # Output
    os.makedirs(os.path.dirname(out_path), exist_ok=True)
    numpy.savetxt(out_path, out_data, header=OUTPUT_HEADER)

    print("Done!")
Ejemplo n.º 13
0
def deploy_single(path):
    """
    Deploy a single project to S3 and, if configured, to our servers.
    """
    require('settings', provided_by=[production, staging])
    slug, abspath = utils.parse_path(path)
    graphic_root = '%s/%s' % (abspath, slug)
    s3_root = '%s/graphics/%s' % (app_config.PROJECT_SLUG, slug)
    graphic_assets = '%s/assets' % graphic_root
    s3_assets = '%s/assets' % s3_root
    graphic_node_modules = '%s/node_modules' % graphic_root

    graphic_config = load_graphic_config(graphic_root)

    use_assets = getattr(graphic_config, 'USE_ASSETS', True)
    default_max_age = getattr(graphic_config, 'DEFAULT_MAX_AGE', None) or app_config.DEFAULT_MAX_AGE
    assets_max_age = getattr(graphic_config, 'ASSETS_MAX_AGE', None) or app_config.ASSETS_MAX_AGE
    update_copy(path)
    if use_assets:
        error = assets.sync(path)
        if error:
            return

    render.render(path)
    flat.deploy_folder(
        graphic_root,
        s3_root,
        headers={
            'Cache-Control': 'max-age=%i' % default_max_age
        },
        ignore=['%s/*' % graphic_assets, '%s/*' % graphic_node_modules,
                # Ignore files unused on static S3 server
                '*.xls', '*.xlsx', '*.pyc', '*.py', '*.less', '*.bak',
                '%s/base_template.html' % graphic_root,
                '%s/child_template.html' % graphic_root,
                '%s/README.md' % graphic_root]
    )

    if use_assets:
        flat.deploy_folder(
            graphic_assets,
            s3_assets,
            headers={
                'Cache-Control': 'max-age=%i' % assets_max_age
            },
            ignore=['%s/private/*' % graphic_assets]
        )

    # Need to explicitly point to index.html for the AWS staging link
    file_suffix = ''
    if env.settings == 'staging':
        file_suffix = 'index.html'

    print ''
    print '%s URL: %s/graphics/%s/%s' % (env.settings.capitalize(), app_config.S3_BASE_URL, slug, file_suffix)
Ejemplo n.º 14
0
    def get(self, *args, **kwargs):
        # need project/team information
        if 'user' not in kwargs:
            self.raise401()

        user = kwargs['user']
        if args:
            path = parse_path(args[0])
            if user.username != path[0]:
                user = User.objects(username=path[0]).first()
        self.write(document_to_json(user, filter_set=_FILTER))
Ejemplo n.º 15
0
 def delete(self, *args, **kwargs):
     if 'user' not in kwargs or not args:
         self.raise401()
     try:
         user = kwargs['user']
         path = parse_path(args[0])
         Repo.objects(owner=user, name=path[0]).delete()
         self.set_status(204)
         self.finish()
     except Exception as e:
         reason = e.message
         self.raise400(reason=reason)
Ejemplo n.º 16
0
def render(path=''):
    """
    Render HTML templates and compile assets.
    """
    custom_location = False
    if path:
        slug, abspath = utils.parse_path(path)
        if abspath != app_config.GRAPHICS_PATH:
            custom_location = True
        _render_graphics(['%s/%s' % (abspath, slug)], custom_location)
    else:
        _render_graphics(glob('%s/*' % app_config.GRAPHICS_PATH))
Ejemplo n.º 17
0
def render(path=''):
    """
    Render HTML templates and compile assets.
    """
    custom_location = False
    if path:
        slug, abspath = utils.parse_path(path)
        if abspath != app_config.GRAPHICS_PATH:
            custom_location = True
        _render_graphics(['%s/%s' % (abspath, slug)], custom_location)
    else:
        _render_graphics(glob('%s/*' % app_config.GRAPHICS_PATH))
Ejemplo n.º 18
0
 def delete(self, *args, **kwargs):
     if 'user' not in kwargs or not args:
         self.raise401()
     user = kwargs['user']
     path = parse_path(args[0])
     team = Team.objects(name=path[0]).first()
     if not team or user not in team.members:
         self.raise401()
     try:
         Team.objects(name=path[0]).delete()
         self.set_status(204)
         self.finish()
     except Exception as e:
         reason = e.message
         self.raise400(reason=reason)
Ejemplo n.º 19
0
 def delete(self, *args, **kwargs):
     if 'user' not in kwargs or not args:
         self.raise401()
     user = kwargs['user']
     path = parse_path(args[0])
     client = Client.objects(app_name=path[0]).first()
     if not client or user != client.user:
         self.raise401()
     try:
         Client.objects(app_name=path[0]).delete()
         self.set_status(204)
         self.finish()
     except Exception as e:
         reason = e.message
         self.raise400(reason=reason)
Ejemplo n.º 20
0
 def delete(self, *args, **kwargs):
     if 'user' not in kwargs or not args:
         self.raise401()
     user = kwargs['user']
     task_id = parse_path(args[0])[0]
     task = Task.objects(id=task_id).first()
     project = task.project
     if not project or user not in project.members:
         self.raise401()
     try:
         Task.objects(id=task_id).delete()
         self.set_status(204)
         self.finish()
     except Exception as e:
         reason = e.message
         self.raise400(reason=reason)
Ejemplo n.º 21
0
 def delete(self, *args, **kwargs):
     if 'user' not in kwargs or not args:
         self.raise401()
     user = kwargs['user']
     task_id = parse_path(args[0])[0]
     task = Task.objects(id=task_id).first()
     project = task.project
     if not project or user not in project.members:
         self.raise401()
     try:
         Task.objects(id=task_id).delete()
         self.set_status(204)
         self.finish()
     except Exception as e:
         reason = e.message
         self.raise400(reason=reason)
Ejemplo n.º 22
0
 def put(self, *args, **kwargs):
     if 'user' not in kwargs or not args:
         self.raise401()
     user = kwargs['user']
     path = parse_path(args[0])
     team = Team.objects(name=path[0]).first()
     if not team or user not in team.members:
         self.raise401()
     team_leader = team.leader
     update = {}
     name = self.get_argument('name', None)
     description = self.get_argument('description', None)
     url = self.get_argument('url', None)
     leader = self.get_argument('leader', None)
     members = self.get_argument('members', None)
     tags = self.get_argument('tags', None)
     if name:
         update['set__name'] = name
     if description:
         update['set__description'] = description
     if url:
         update['set__url'] = url
     if leader:
         team_leader = User.objects(username=leader).first()
         update['set__leader'] = team_leader
     if members:
         members_list = []
         for member in parse_listed_strs(members):
             u = User.objects(username=member).first()
             if not u or u == team_leader:
                 continue
             members_list.append(u)
         members_list.append(team_leader)
         update['set__members'] = members_list
     if tags:
         tags_list = parse_listed_strs(tags)
         update['set__tags'] = tags_list
     try:
         Team.objects(name=path[0]).update_one(**update)
         team = Team.objects(name=name or path[0]).first()
         team_data = document_to_json(team, filter_set=_FILTER)
         self.set_status(201)
         self.write(team_data)
     except Exception as e:
         reason = e.message
         self.raise400(reason=reason)
Ejemplo n.º 23
0
    def delete(self, *args, **kwargs):
        """Delete a project by its name provided in URL.
        """
        if 'user' not in kwargs or not args:
            self.raise401()

        user = kwargs['user']
        path = parse_path(args[0])
        project = Project.objects(name=path[0], members__in=[user])
        if not project:
            self.raise401()
        try:
            project.delete()
            self.set_status(204)
            self.finish()
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 24
0
    def delete(self, *args, **kwargs):
        """Delete a project by its name provided in URL.
        """
        if 'user' not in kwargs or not args:
            self.raise401()

        user = kwargs['user']
        path = parse_path(args[0])
        project = Project.objects(name=path[0], members__in=[user])
        if not project:
            self.raise401()
        try:
            project.delete()
            self.set_status(204)
            self.finish()
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 25
0
def download_copy(path):
    """
    Downloads a Google Doc as an .xlsx file.
    """
    slug, abspath = utils.parse_path(path)
    graphic_path = '%s/%s' % (abspath, slug)

    try:
        graphic_config = load_graphic_config(graphic_path)
    except IOError:
        print '%s/graphic_config.py does not exist.' % slug
        return

    if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY:
        print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug
        return

    copy_path = os.path.join(graphic_path, '%s.xlsx' % slug)
    get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path)
def main():
    parser = argparse.ArgumentParser(description="yumi_push_act_dist")
    parser.add_argument("--out",
                        type=str,
                        default="",
                        help="outs directory that should be evaluated")
    parser.add_argument("--policy",
                        type=str,
                        default="",
                        help="policy directory that should be evaluated")
    parser.add_argument("--tag",
                        type=str,
                        default="",
                        help="image names to look for while creating video")
    parser.add_argument("--video_name", type=str, default="")
    args = parser.parse_args()
    if not ".avi" in args.video_name:
        args.video_name = args.video_name + ".avi"

    directory, path = parse_path(args)
    images_to_video(path, args.tag, args.video_name)
Ejemplo n.º 27
0
def download_copy(path):
    """
    Downloads a Google Doc as an .xlsx file.
    """
    slug, abspath = utils.parse_path(path)
    graphic_path = '%s/%s' % (abspath, slug)

    try:
        graphic_config = load_graphic_config(graphic_path)
    except IOError:
        print '%s/graphic_config.py does not exist.' % slug
        return

    if not hasattr(
            graphic_config,
            'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY:
        print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug
        return

    copy_path = os.path.join(graphic_path, '%s.xlsx' % slug)
    get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path)
Ejemplo n.º 28
0
def get_graphic_template_variables(path, graphic_number):
    """
    Generates the template variables for each graphic
    """
    slug, abspath = utils.parse_path(path)
    graphic_path = '%s/%s' % (abspath, slug)

    ## Get Spreadsheet Path
    try:
        graphic_config = load_graphic_config(graphic_path)
    except IOError:
        print '%s/graphic_config.py does not exist.' % slug
        return

    if not hasattr(
            graphic_config,
            'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY:
        print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug
        return

    ## Generate Links From Slug
    spreadsheet_id = graphic_config.COPY_GOOGLE_DOC_KEY
    app_id = slug

    ## Update Spreadsheet
    copy_path = os.path.join(graphic_path, '%s.xlsx' % slug)
    get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path)

    ## Get Sheet Data
    copy = copytext.Copy(filename=copy_path)
    sheet = copy['labels']

    note = {
        "spreadsheet_id": spreadsheet_id,
        "app_id": app_id,
        "graphic_number": graphic_number + 1,
        "sheet": sheet,
    }

    return note
Ejemplo n.º 29
0
    def get(self, *args, **kwargs):
        if 'user' not in kwargs:
            self.raise401()

        user = kwargs['user']
        if args:
            path = parse_path(args[0])
            team = Team.objects(name=path[0]).first()
            if not team:
                self.raise404()
            team_data = document_to_json(team, filter_set=_FILTER)
        else:
            # username = self.get_argument('username', None)
            # try:
            #     username = parse_path(username)[0]
            # except IndexError:
            #     username = None
            # if username:
            #     user = User.objects(username=username).fisrt()
            limit = self.get_argument('limit', None)
            start = self.get_argument('start', None)
            try:
                limit = int(limit)
            except:
                limit = None
            try:
                start = int(start)
            except:
                start = None
            teams = Team.objects(members__in=[user])
            if limit and start:
                teams = teams[start: start+limit]
            elif limit:
                teams = teams[:limit]
            elif start:
                teams = teams[start:]
            team_data = query_to_json(teams, filter_set=_FILTER)
        self.write(team_data)
Ejemplo n.º 30
0
def get_graphic_template_variables(path, graphic_number):
    """
    Generates the template variables for each graphic
    """
    slug, abspath = utils.parse_path(path)
    graphic_path = '%s/%s' % (abspath, slug)

    ## Get Spreadsheet Path
    try:
        graphic_config = load_graphic_config(graphic_path)
    except IOError:
        print '%s/graphic_config.py does not exist.' % slug
        return

    if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY:
        print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug
        return

    ## Generate Links From Slug
    spreadsheet_id = graphic_config.COPY_GOOGLE_DOC_KEY
    app_id = slug

    ## Update Spreadsheet
    copy_path = os.path.join(graphic_path, '%s.xlsx' % slug)
    get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path)

    ## Get Sheet Data
    copy = copytext.Copy(filename=copy_path)
    sheet = copy['labels']

    note = {
        "spreadsheet_id": spreadsheet_id,
        "app_id": app_id,
        "graphic_number": graphic_number + 1,
        "sheet": sheet,
    }

    return note
Ejemplo n.º 31
0
    def post(self, *args, **kwargs):
        if 'user' not in kwargs:
            self.raise401()
        if not args:
            self.raise404()

        path = parse_path(args[0])
        task = Task.objects(id=path[0]).first()
        if not task:
            self.raise404()
        user = kwargs['user']
        if user not in task.project.members:
            self.raise401()
        content = self.get_argument('content', None)
        try:
            comment = TaskComment(content=content, author=user)
            Task.objects(id=path[0]).update_one(push__comments=comment)
            comment_data = document_to_json(comment,
                                            filter_set=_COMMENT_FILTER)
            self.set_status(201)
            self.write(comment_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 32
0
    def post(self, *args, **kwargs):
        if 'user' not in kwargs:
            self.raise401()
        if not args:
            self.raise404()

        path = parse_path(args[0])
        task = Task.objects(id=path[0]).first()
        if not task:
            self.raise404()
        user = kwargs['user']
        if user not in task.project.members:
            self.raise401()
        content = self.get_argument('content', None)
        try:
            comment = TaskComment(content=content, author=user)
            Task.objects(id=path[0]).update_one(push__comments=comment)
            comment_data = document_to_json(
                comment, filter_set=_COMMENT_FILTER)
            self.set_status(201)
            self.write(comment_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 33
0
def deploy_single(path):
    """
    Deploy a single project to S3 and, if configured, to our servers.
    """
    require('settings', provided_by=[production, staging])
    SLACK_TOKEN = os.environ.get('SLACK_TOKEN')

    if SLACK_TOKEN == None:
        print "Can't find the Slack Token. Source your bash."
        return

    slack_client = SlackClient(SLACK_TOKEN)
    slug, abspath = utils.parse_path(path)
    graphic_root = '%s/%s' % (abspath, slug)
    s3_root = '%s/%s' % (app_config.PROJECT_SLUG, slug)
    graphic_assets = '%s/assets' % graphic_root
    s3_assets = '%s/assets' % s3_root
    graphic_node_modules = '%s/node_modules' % graphic_root

    graphic_config = load_graphic_config(graphic_root)

    use_assets = getattr(graphic_config, 'USE_ASSETS', True)
    default_max_age = getattr(graphic_config, 'DEFAULT_MAX_AGE', None) or app_config.DEFAULT_MAX_AGE
    assets_max_age = getattr(graphic_config, 'ASSETS_MAX_AGE', None) or app_config.ASSETS_MAX_AGE
    update_copy(path)
    if use_assets:
        error = assets.sync(path)
        if error:
            return

    render.render(path)
    flat.deploy_folder(
        graphic_root,
        s3_root,
        headers={
            'Cache-Control': 'max-age=%i' % default_max_age
        },
        ignore=['%s/*' % graphic_assets, '%s/*' % graphic_node_modules,
                # Ignore files unused on static S3 server
                '*.xls', '*.xlsx', '*.pyc', '*.py', '*.less', '*.bak',
                '%s/base_template.html' % graphic_root,
                '%s/child_template.html' % graphic_root]
    )

    if use_assets:
        flat.deploy_folder(
            graphic_assets,
            s3_assets,
            headers={
                'Cache-Control': 'max-age=%i' % assets_max_age
            },
            ignore=['%s/private/*' % graphic_assets]
        )
    def get_start_message(slug):
        result = slack_client.api_call(
            "search.messages",
            query= '{0} in:charts'.format(slug),
        )
        if result['messages']['matches'][0].get('attachments') != None:
            print 'found attachments'
            if result['messages']['matches'][0]['attachments'][0]['title'] == slug:
                print 'found it'
                return result['messages']['matches'][0]['ts']
        if result['messages']['matches'][0].get('previous') != None:
            print 'found previous'
            if 'attachments' in result['messages']['matches'][0]['previous']:
                if result['messages']['matches'][0]['previous']['attachments'][0]['title'] == slug:
                    print 'found in previous mention'
                    return result['messages']['matches'][0]['previous']['ts']
        if result['messages']['matches'][0].get('previous_2') != None:
            print 'its in previous_2'
            if 'attachments' in result['messages']['matches'][0]['previous_2']:
                print 'attachemnts in prevous_2 '
                if result['messages']['matches'][0]['previous_2']['attachments'][0]['title'] == slug:
                    print 'found in previous_2 mention'
                    return result['messages']['matches'][0]['previous_2']['ts']
        else:
            print('Not found')
            print(result['messages']['matches'][0])
    def send_thread_message(message, ts):
            slack_client.api_call(
                "chat.postMessage",
                channel='#charts',
                text=message,
                username='******',
                icon_emoji=':bowtie:',
                thread_ts=ts,
        )
    if env.settings == 'production':
        message = 'Updated final link'
    else:
        message = 'Updated review link'
    """message_search_results = get_start_message(slug)
    if message_search_results != None:
        send_thread_message(message, message_search_results)
        print 'message sent'"""
    print ''
    print '%s URL: %s/%s/index.html' % (env.settings.capitalize(), app_config.S3_BASE_URL, slug)
Ejemplo n.º 34
0
def build_table_data(conn, images, ignoreFirstFileToken=False, ignoreLastFileToken=False):
    """
    We need to build tagging table data when the page originally loads 
    """

    def listTags(image):
        """ This should be in the BlitzGateway! """
        return [a for a in image.listAnnotations() if a.__class__.__name__ == "TagAnnotationWrapper"]

    # Need to build our table...

    # First go through all images, getting all the tokens
    # Each set of tokens must be separate so that they can be distinguished
    pathTokens = []
    fileTokens = []
    extTokens = []
    # Also record which tokens are in which images to avoid reparsing later per-image
    imagesTokens = {}

    for image in images:
        name = image.getName()
 
        pt, ft, et = parse_path(name)
        
        # Do discards
        #TODO Incredibly primitive, replace with much, much smarter discarding system
        if (ignoreFirstFileToken):
            ft.pop(0)
        if (ignoreLastFileToken):
            ft.pop()

        pathTokens.extend(pt)
        fileTokens.extend(ft)
        extTokens.extend(et)
        imagesTokens[image] = set(pt + et + ft)

    # Remove duplicates from each set
    pathTokens = set(pathTokens)
    fileTokens = set(fileTokens)
    extTokens = set(extTokens)
    # Remove duplicates that exist between sets (from path, then extenstion)
    pathTokens = pathTokens - fileTokens
    pathTokens = pathTokens - extTokens
    extTokens = extTokens - fileTokens

    # Convert back to list
    pathTokens = list(pathTokens)
    fileTokens = list(fileTokens)
    extTokens = list(extTokens)
    
    # Order the lists by name
    pathTokens.sort(key=lambda name: name.lower())
    fileTokens.sort(key=lambda name: name.lower())
    extTokens.sort(key=lambda name: name.lower())

    tokens = {'pathTokens' : pathTokens, 'fileTokens' : fileTokens, 'extTokens' : extTokens}

    tokenTags = {}
    # find which tokens match existing Tags
    for tokenType in ['pathTokens', 'fileTokens','extTokens']:
        tt = []
        for token in tokens[tokenType]:

            # Skip zero length tokens
            if len(token) == 0:
                continue

            # Skip (at least for now) tokens that are simply numbers
            if token.isdigit():
                continue

            # Get all tags matching the token
            matchingTags = list(conn.getObjects("TagAnnotation", attributes={'textValue':token}))

            tags = []
            # For each of the matching tags
            for matchingTag in matchingTags:
                # Add dictionary of details
                tags.append({'name':matchingTag.getValue(), 'id':matchingTag.getId(), 'desc':matchingTag.getDescription(), 'ownerName':matchingTag.getOwnerFullName()})

            tokenTagMap = {'name':token}

            # Assign the matching tags to the token dictionary (only if there are any)
            if len(tags) > 0:
                tokenTagMap['tags'] = tags

            # Add the token with any tag mappings to the list
            tt.append(tokenTagMap)

        tokenTags[tokenType] = tt

    # Populate the images with details
    imageDetails = []
    imageStates = {}

    for image, allTokens in imagesTokens.iteritems():

        # Create mapping of tags that exist already on this image (tagValue : [ids])
        imageTags = {}
        for tag in listTags(image):
            if tag.getValue() in imageTags:
                imageTags[tag.getValue()].append(tag.getId())
            else:
                imageTags[tag.getValue()] = [tag.getId()]

        imageTokens = []
        imageTokenStates = {}
        # For each token that exists (tokens from all images)
        for tokenType in ['pathTokens', 'fileTokens','extTokens']:
            for token in tokenTags[tokenType]:
                imageToken = {'name':token['name']}
                # If the token is present in the image
                if token['name'] in allTokens:
                    # Get the tags (if any) that are relevant
                    if 'tags' in token:
                        tags = token['tags']
                    # Mark the token for autoselect (Do this even if the token is not matched)
                    imageToken['autoselect'] = True

                # Assign token type
                imageToken['tokenType'] = tokenType

                # Add all the matching tags 
                if token['name'] in imageTags:
                    # Add the tagIds that match to this token
                    imageToken['tags'] = imageTags[token['name']]

                    # If there is just the one matching tag for this column, mark the token selected
                    #TODO This could be removed in favor of a simple filter in django?
                    if len(token['tags']) == 1:
                        imageToken['selected'] = True

                # If the token has no matching tags or more than 1
                if 'tags' not in token or len(token['tags']) != 1:
                    imageToken['disabled'] = True 

                imageTokens.append(imageToken)
                imageTokenStates[token['name']] = imageToken

        imageDetail = {'id':image.getId(), 'name':image.getName(), 'tokens':imageTokens}
        imageStates[image.getId()] = {'name':image.getName(), 'tokens':imageTokenStates}
        imageDetails.append(imageDetail)
    # Sort imageDetails
    imageDetails.sort(key=lambda name: name['name'].lower())

    # How this works:
    # tokenTags is a list of the tokens involved in all the images. These contain details of the tags that match
    # imageDetails is a list of the images, each one has details per-above tokens. e.g. If the token is matched,
    # has a tag already selected or if it should be auto-selected 

    #print 'tokenTags: ', tokenTags          #PRINT
    #print 'imageDetails: ', imageDetails    #PRINT

    return tokenTags, imageDetails, imageStates
Ejemplo n.º 35
0
    def get(self, *args, **kwargs):
        """Retrieve the resources of projects for the current user.

        If `*args` is provided by matching the URL pattern, the first element
        in the args is considered as a project name, then the project data will
        be retrieved from Database and send back to the client and the source
        owner in the format of JSON.
        Otherwise, it responses with a list of projects parcipated by the
        user. The request can provide three arugments: `team`, `limit` and
        `start`. `team` is used for querying the projects of one team by
        its name, which the user is one of its memebers. `limit` is
        the max number of items sent back to the client. `start` is the
        starting index of the querying results.

        Only authenticated user/resouce owner can access by using access_token,
        and his/her scopes must include `projects`.

        The retrieved resource should always be related to the user, and it is
        not allowed to access others' projects or other teams' projects.

        .. todo::
            restrict the response data and add default limits
        """
        if 'user' not in kwargs:
            self.raise401()

        user = kwargs['user']
        if args:
            path = parse_path(args[0])
            project = Project.objects(name=path[0]).first()
            if not project:
                self.raise404()
            if project and user not in project.members:
                self.raise401()
            project_data = document_to_json(project, filter_set=_FILTER)
        else:
            team_name = self.get_argument('team', None)
            limit = self.get_argument('limit', None)
            start = self.get_argument('start', None)
            try:
                team_name = parse_path(team_name)[0]
            except IndexError:
                team_name = None
            try:
                limit = int(limit)
            except Exception:
                limit = None
            try:
                start = int(start)
            except Exception:
                start = None
            if team_name:
                team = Team.objects(name=team_name).first()
                if not team:
                    self.raise404()
                if user not in team.members:
                    self.raise403()
                project = Project.objects(teams__in=[team])
            else:
                project = Project.objects(members__in=[user])
            if limit and start:
                project = project[start:start + limit]
            elif limit:
                project = project[:limit]
            elif start:
                project = project[start:]
            project_data = query_to_json(project, filter_set=_FILTER)
        self.write(project_data)
Ejemplo n.º 36
0
def test_single(path, use='Chrome', screenshot=True, pymParent=False):
    """
    Test a graphic looking for browser warnings and errors
    Using selenium & chrome webdriver
    """
    screenshot = utils.prep_bool_arg(screenshot)
    pymParent = utils.prep_bool_arg(pymParent)
    log_content = []
    require('settings', provided_by=['production', 'staging'])
    slug, abspath = utils.parse_path(path)
    # Need to explicitly point to index.html for the AWS staging link
    file_suffix = ''
    if env.settings == 'staging':
        file_suffix = 'index.html'
    url = '%s/graphics/%s/%s' % (app_config.S3_BASE_URL, slug, file_suffix)
    logger.info('url: %s' % url)
    OUTPUT_PATH = os.path.join(cwd, '../test')
    # Create output files folder if needed
    if not os.path.exists(OUTPUT_PATH):
        os.makedirs(OUTPUT_PATH)
    driver = _choose_web_driver(use)
    try:
        if pymParent:
            driver.execute_script(RESET_SCRIPT)
        driver.get(url)
        # Wait for pym to be loaded
        # Execute a script that listens to the child message
        # and sets a global variable on the browser's window
        # Then make an explicit wait until the global var is set to true
        if pymParent:
            try:
                WebDriverWait(driver, app_config.TEST_SCRIPTS_TIMEOUT).until(
                    lambda driver: driver.execute_script(CHECK_PYM_SCRIPT))
            except TimeoutException:
                logger.info("%s - Timeout: PymParent Not found." % (slug))
                line = [slug, 'INFO', 'Timeout: PymParent Not found']
                log_content.append(line)
        # Wait a configurable time for the page to load
        time.sleep(app_config.TESTS_LOAD_WAIT_TIME)
        if pymParent:
            # Force Pym Message communication
            driver.execute_script(COMUNICATION_SCRIPT)
            try:
                WebDriverWait(driver, app_config.TEST_SCRIPTS_TIMEOUT).until(
                    lambda driver: driver.execute_script(VALIDATION_SCRIPT))
            except TimeoutException:
                logger.info("%s - Timeout: No messaging." % (slug))
                line = [slug, 'INFO', 'Timeout: No messaging']
                log_content.append(line)
        log = driver.get_log('browser')
        if not log:
            logger.info("Test was successful")
        else:
            log_content.append(['id', 'level', 'message'])
            for entry in log:
                clean_message = u'%s' % (safe_unicode(
                    safe_str(entry['message'])))
                clean_message = clean_message.replace('\n', '')
                line = [slug, entry['level'], clean_message]
                log_content.append(line)
                if entry['level'] == 'ERROR':
                    logger.error("Reason %s" % clean_message)
                elif entry['level'] == 'WARNING':
                    logger.warning("Reason %s" % clean_message)
                else:
                    logger.info("Found some console.log output %s" %
                                (clean_message))
    finally:
        if screenshot:
            driver.save_screenshot('%s/%s-%s.png' %
                                   (OUTPUT_PATH, env.settings, slug))
        driver.quit()
        if log_content:
            with open('%s/%s-%s.log' % (OUTPUT_PATH, env.settings, slug),
                      'w') as writefile:
                writer = csv.writer(writefile, quoting=csv.QUOTE_MINIMAL)
                writer.writerows(log_content)
Ejemplo n.º 37
0
def parse_request():
    parse_path("/users")
    return jsonify({1: 3})
Ejemplo n.º 38
0
 def get(self, *args, **kwargs):
     if 'user' not in kwargs:
         self.raise401()
     user = kwargs['user']
     repo_type = None
     repo_query = None
     repo_contents = None
     repo_branches = None
     repo_tags = None
     repo_info = None
     limit = self.get_argument('limit', None)
     start = self.get_argument('start', None)
     try:
         limit = int(limit)
     except:
         limit = None
     if args:
         # author = self.get_argument('author', None)
         path = parse_path(args[0])
         if not path:
             self.raise404()
         repo = Repo.objects(owner=user, name=path[0]).first()
         if repo:
             scm_repo = GitRepo(repo.path)
             repo_info = scm_repo.get_info()
             repo_branches, repo_tags = get_repo_branches_tags(scm_repo)
             repo_type, repo_query, repo_contents = get_repo_contents(
                 scm_repo, path[1:], limit=limit, start=start)
         if not repo_contents:
             self.raise404()
         repo_data = document_to_json(repo, filter_set=_FILTER)
     else:
         team_name = self.get_argument('team_name', None)
         try:
             start = int(start)
         except:
             start = None
         try:
             team_name = parse_path(team_name)[0]
         except IndexError:
             team_name = None
         if team_name:
             team = Team.objects(name=team_name).first()
             if not team:
                 self.raise404()
             if user not in team.member:
                 self.raise403()
             repos = Repo.objects(team=team)
         else:
             repos = Repo.objects(owner=user)
         if limit and start:
             repos = repos[start: start+limit]
         elif limit:
             repos = repos[:limit]
         elif start:
             repos = repos[start:]
         repo_data = query_to_json(repos, filter_set=_FILTER)
     if repo_type and repo_contents:
         repo_data['repo_info'] = repo_info
         repo_data['repo_type'] = repo_type
         repo_data['repo_query'] = repo_query
         repo_data['repo_branches'] = repo_branches
         repo_data['repo_tags'] = repo_tags
         repo_data['repo_contents'] = repo_contents
     self.write(repo_data)
Ejemplo n.º 39
0
def bulk_test(csvpath, use='Chrome', screenshot=True, pymParent=False):
    """
    Test graphics browser warnings & errors -- use batch for multiple graphics
    Using selenium & chrome webdriver
    """
    screenshot = utils.prep_bool_arg(screenshot)
    pymParent = utils.prep_bool_arg(pymParent)
    fname = os.path.basename(csvpath)
    url_pattern = re.compile(r'(?:/|storyId=)(\d{9})/?')
    # Assume that a filepath is given read contents and clean them
    with open(csvpath, 'r') as f:
        content = f.readlines()
    content = [x.strip() for x in content]
    # Timestamp of the test
    ts = re.sub(r'\..*', '', str(datetime.datetime.now()))
    ts = re.sub(r'[\s:-]', '_', ts)
    log_content = [['id', 'level', 'message']]
    OUTPUT_PATH = os.path.join(cwd, '../test/%s' % ts)
    # Create output files folder if needed
    if not os.path.exists(OUTPUT_PATH):
        os.makedirs(OUTPUT_PATH)
    driver = _choose_web_driver(use)
    try:
        for ix, item in enumerate(content):
            if re.match(r'^https?://', item):
                m = url_pattern.search(item)
                if m:
                    slug = m.group(1)
                else:
                    slug = 'line%s' % (ix + 1)
                url = item
                env.settings = 'url'
            else:
                require('settings', provided_by=['production', 'staging'])
                slug, _ = utils.parse_path(item)
                # Need to explicitly point to index.html
                # for the AWS staging link
                file_suffix = ''
                if env.settings == 'staging':
                    file_suffix = 'index.html'
                url = '%s/graphics/%s/%s' % (app_config.S3_BASE_URL, slug,
                                             file_suffix)
            logger.info('url: %s' % url)
            if pymParent:
                driver.execute_script(RESET_SCRIPT)
            driver.get(url)
            # Wait for pym to be loaded
            if pymParent:
                try:
                    WebDriverWait(driver,
                                  app_config.TEST_SCRIPTS_TIMEOUT).until(
                                      lambda driver: driver.execute_script(
                                          CHECK_PYM_SCRIPT))
                except TimeoutException:
                    logger.info("%s - Timeout: PymParent Not found." % (slug))
                    line = [slug, 'INFO', 'Timeout: Pym Not found']
                    log_content.append(line)
            # Wait a configurable time for the page to load
            time.sleep(app_config.TESTS_LOAD_WAIT_TIME)
            # Force Pym Message communication
            driver.execute_script(COMUNICATION_SCRIPT)
            if pymParent:
                try:
                    WebDriverWait(driver,
                                  app_config.TEST_SCRIPTS_TIMEOUT).until(
                                      lambda driver: driver.execute_script(
                                          VALIDATION_SCRIPT))
                except TimeoutException:
                    logger.info("%s - Timeout: No messaging." % (slug))
                    line = [slug, 'INFO', 'Timeout: No messaging']
                    log_content.append(line)

            # Get browser log and parse output
            log = driver.get_log('browser')
            if not log:
                logger.info("%s - Test successful" % (slug))
                line = [slug, 'SUCCESS', 'Test successful with no logs']
                log_content.append(line)
            else:
                logger.warning("%s - Test found issues. Check log" % (slug))
                for entry in log:
                    clean_message = u'%s' % (safe_unicode(
                        safe_str(entry['message'])))
                    clean_message = clean_message.replace('\n', '')
                    line = [slug, entry['level'], clean_message]
                    log_content.append(line)

            # Save screenshot
            if screenshot:
                driver.save_screenshot('%s/%s-%s.png' %
                                       (OUTPUT_PATH, env.settings, slug))
    finally:
        driver.quit()
        if log_content:
            with open('%s/test-%s' % (OUTPUT_PATH, fname), 'w') as writefile:
                writer = csv.writer(writefile, quoting=csv.QUOTE_MINIMAL)
                writer.writerows(log_content)
Ejemplo n.º 40
0
    def put(self, *args, **kwargs):
        """Update a project by its name and other information.
        """
        if 'user' not in kwargs or not args:
            self.raise401()
        name = self.get_argument('name', None)
        description = self.get_argument('description', None)
        url = self.get_argument('url', None)
        leader = self.get_argument('leader', None)
        members = self.get_argument('members', None)
        teams = self.get_argument('teams', None)
        repos = self.get_argument('repos', None)
        tags = self.get_argument('tags', None)

        user = kwargs['user']
        path = parse_path(args[0])
        project = Project.objects(name=path[0]).first()
        if not project or user not in project.members:
            self.raise401()
        project_leader = project.leader
        update = {}
        if name:
            update['set__name'] = name
        if description:
            update['set__description'] = description
        if url:
            update['set__url'] = url
        if leader:
            project_leader = User.objects(username=leader).first()
            update['set__leader'] = project_leader
        if members:
            members_list = []
            for member in parse_listed_strs(members):
                u = User.objects(username=member).first()
                if not u or u == project_leader:
                    continue
                members_list.append(u)
            members_list.append(project_leader)
            update['set__members'] = members_list
        if teams:
            teams_list = []
            for team in parse_listed_strs(teams):
                t = Team.objects(name=team).first()
                if not t:
                    continue
                teams_list.append(t)
            update['set__teams'] = teams_list
        if repos:
            repos_list = []
            for repo in parse_listed_strs(repos):
                r = Repo.objects(name=repo).first()
                if not r:
                    continue
                repos_list.append(r)
            update['set__repos'] = repos_list
        if tags:
            tags_list = parse_listed_strs(tags)
            update['set__tags'] = tags_list
        try:
            Project.objects(name=path[0]).update_one(**update)
            project = Repo.objects(name=name or path[0]).first()
            project_data = document_to_json(project, filter_set=_FILTER)
            self.set_status(201)
            self.write(project_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 41
0
    def get(self, *args, **kwargs):
        """Retrieve the resources of projects for the current user.

        If `*args` is provided by matching the URL pattern, the first element
        in the args is considered as a project name, then the project data will
        be retrieved from Database and send back to the client and the source
        owner in the format of JSON.
        Otherwise, it responses with a list of projects parcipated by the
        user. The request can provide three arugments: `team`, `limit` and
        `start`. `team` is used for querying the projects of one team by
        its name, which the user is one of its memebers. `limit` is
        the max number of items sent back to the client. `start` is the
        starting index of the querying results.

        Only authenticated user/resouce owner can access by using access_token,
        and his/her scopes must include `projects`.

        The retrieved resource should always be related to the user, and it is
        not allowed to access others' projects or other teams' projects.

        .. todo::
            restrict the response data and add default limits
        """
        if 'user' not in kwargs:
            self.raise401()

        user = kwargs['user']
        if args:
            path = parse_path(args[0])
            project = Project.objects(name=path[0]).first()
            if not project:
                self.raise404()
            if project and user not in project.members:
                self.raise401()
            project_data = document_to_json(project, filter_set=_FILTER)
        else:
            team_name = self.get_argument('team', None)
            limit = self.get_argument('limit', None)
            start = self.get_argument('start', None)
            try:
                team_name = parse_path(team_name)[0]
            except IndexError:
                team_name = None
            try:
                limit = int(limit)
            except Exception:
                limit = None
            try:
                start = int(start)
            except Exception:
                start = None
            if team_name:
                team = Team.objects(name=team_name).first()
                if not team:
                    self.raise404()
                if user not in team.members:
                    self.raise403()
                project = Project.objects(teams__in=[team])
            else:
                project = Project.objects(members__in=[user])
            if limit and start:
                project = project[start:start + limit]
            elif limit:
                project = project[:limit]
            elif start:
                project = project[start:]
            project_data = query_to_json(project, filter_set=_FILTER)
        self.write(project_data)
Ejemplo n.º 42
0
def build_table_data(conn, images, ignoreFirstFileToken=False,
                     ignoreLastFileToken=False):
    """
    We need to build tagging table data when the page originally loads 
    """

    def listTags(image):
        """ This should be in the BlitzGateway! """
        return [a for a in image.listAnnotations() if a.__class__.__name__ ==
                "TagAnnotationWrapper"]

    # New all_table data
    table_data = TableData()

    # First go through all images, getting all the tokens

    # Complete list of Tokens. If a Token already exists it is read from here
    # instead of being recreated. If necessary, it has its tokentype overriden
    # if the type being added has a higher degree of precedence than before
    #TODO If we ever need this later, it could be put straight into TableData
    # in place of the all_tokens list that is currently stored there
    all_tokens = {}

    # Process the images to extract tokens only
    for image in images:

        # Create the TableRow for this image
        row = table_data.add_image(image)
        # row = TableRow(table_data, image)

        # Use the full client import path if possible
        name = getImageClientPath(image).strip()
        # If not possible (OMERO 4.4.x), just use the name
        if len(name) > 0:
            # Set the client_path so this can be used in in the rendering
            # If this isn't set, then the image name gets used instead
            row.set_client_path(name)
        else:
            name = image.getName()
 
        pt, ft, et = parse_path(name)
        
        # Do discards
        #TODO Incredibly primitive, replace with much, much smarter discarding
        # system
        if (ignoreFirstFileToken) and len(ft) > 0:
            ft.pop(0)
        if (ignoreLastFileToken) and len(ft) > 0:
            ft.pop()


        # Convert tokens to Tokens
        # TODO Refactor these into a function

        # Process path tokens (Lowest priorty so never override)
        for t in pt:
            # Skip zero length tokens
            if len(t) == 0:
                continue
            if t in all_tokens:
                token = all_tokens[t]
            else:
                token = Token(t, 'path')
                all_tokens[t] = token
            row.add_token(token)

        # Process Extension tokens (Middle priority so only override if 
        # current tokentype is 'path')
        for t in et:
            # Skip zero length tokens
            if len(t) == 0:
                continue
            if t in all_tokens:
                token = all_tokens[t]
                if token.tokentype == 'path':
                    token.set_tokentype('ext')
            else:
                token = Token(t, 'ext')
                all_tokens[t] = token
            row.add_token(token)

        # Process file tokens (highest priority so override all)
        for t in ft:
            # Skip zero length tokens
            if len(t) == 0:
                continue
            if t in all_tokens:
                token = all_tokens[t]
                token.set_tokentype('file')
            else:
                token = Token(t, 'file')
                all_tokens[t] = token
            row.add_token(token)

    # Update table_data with the full list of Tokens
    table_data.set_tokens(all_tokens.values())


    # List of all token details: [{name, tokenType, tagList}, ... ]
    # token_details = []

    # Find which tokens match existing Tags
    for token in table_data.all_tokens[:]:

        # Get all tags matching the token
        # TODO Could I reduce this to one query which takes all the tokens?
        tags = list(conn.getObjects(
            "TagAnnotation", 
            attributes={'textValue':token.value})
        )

        # Any tokens that are simply numbers that are not already tags
        if token.value.isdigit() and len(tags) == 0:
            # these need to be removed from the all_list and the rows
            table_data.remove_token(token)
            # Then continue to the next token
            continue

        # Add the matching tags, discarding any that can not be linked
        # This is when the group is read-only and the querying user is not
        # the owner of the tag
        for tag in tags:
            if tag.canLink():
                # Add the matching tags to this token
                token.add_tags([tag])

                # Update the matched_tags in table_data
                table_data.matched_tags.update([tag])
                # TODO Do I need to update the all_tags in table_data??

    # Find the tags that are prexisting on these images
    for row in table_data.rows:
        # Get the tags on this image
        tags = listTags(row.image)

        # Add the tags to this image's row and automatically the all_tags list
        row.add_tags(tags)

    return table_data
Ejemplo n.º 43
0
    def put(self, *args, **kwargs):
        """Update a project by its name and other information.
        """
        if 'user' not in kwargs or not args:
            self.raise401()
        name = self.get_argument('name', None)
        description = self.get_argument('description', None)
        url = self.get_argument('url', None)
        leader = self.get_argument('leader', None)
        members = self.get_argument('members', None)
        teams = self.get_argument('teams', None)
        repos = self.get_argument('repos', None)
        tags = self.get_argument('tags', None)

        user = kwargs['user']
        path = parse_path(args[0])
        project = Project.objects(name=path[0]).first()
        if not project or user not in project.members:
            self.raise401()
        project_leader = project.leader
        update = {}
        if name:
            update['set__name'] = name
        if description:
            update['set__description'] = description
        if url:
            update['set__url'] = url
        if leader:
            project_leader = User.objects(username=leader).first()
            update['set__leader'] = project_leader
        if members:
            members_list = []
            for member in parse_listed_strs(members):
                u = User.objects(username=member).first()
                if not u or u == project_leader:
                    continue
                members_list.append(u)
            members_list.append(project_leader)
            update['set__members'] = members_list
        if teams:
            teams_list = []
            for team in parse_listed_strs(teams):
                t = Team.objects(name=team).first()
                if not t:
                    continue
                teams_list.append(t)
            update['set__teams'] = teams_list
        if repos:
            repos_list = []
            for repo in parse_listed_strs(repos):
                r = Repo.objects(name=repo).first()
                if not r:
                    continue
                repos_list.append(r)
            update['set__repos'] = repos_list
        if tags:
            tags_list = parse_listed_strs(tags)
            update['set__tags'] = tags_list
        try:
            Project.objects(name=path[0]).update_one(**update)
            project = Repo.objects(name=name or path[0]).first()
            project_data = document_to_json(project, filter_set=_FILTER)
            self.set_status(201)
            self.write(project_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Ejemplo n.º 44
0
def sync(path):
    """
    Intelligently synchronize assets between S3 and local folder.
    """
    ignore_globs = []
    slug, abspath = utils.parse_path(path)
    if not os.path.exists('%s/%s' % (abspath, slug)):
        print 'Slug "%s" does not exist!' % slug
        return True

    assets_root = '%s/%s/assets' % (abspath, slug)
    s3_root = '%s/%s' % (app_config.ASSETS_SLUG, slug)

    try:
        with open('%s/assetsignore' % assets_root, 'r') as f:
            ignore_globs = [l.strip() for l in f]
    except IOError:
        ignore_globs = []

    local_paths = []
    not_lowercase = []

    for local_path, subdirs, filenames in os.walk(assets_root):
        for name in filenames:
            full_path = os.path.join(local_path, name)
            glob_path = full_path.split(assets_root)[1].strip('/')

            ignore = False

            for ignore_glob in ignore_globs:
                if fnmatch(glob_path, ignore_glob):
                    ignore = True
                    break

            if ignore:
                print 'Ignoring: %s' % full_path
                continue

            if name.lower() != name:
                not_lowercase.append(full_path)

            local_paths.append(full_path)

    # Prevent case sensitivity differences between OSX and S3
    # from screwing us up
    if not_lowercase:
        print 'The following filenames are not lowercase, ' \
            'please change them before running `assets.sync`. '
        for name in not_lowercase:
            print '    %s' % name

        print 'WARNING: This must be fixed before you can deploy.'
        return True

    bucket = _assets_get_bucket()
    keys = bucket.list(s3_root)

    which = None
    always = False

    for key in keys:
        download = False
        upload = False

        local_path = key.name.replace(s3_root, assets_root, 1)

        # Skip root key
        if local_path == '%s/' % assets_root:
            continue

        print local_path

        if local_path in local_paths:
            # A file can only exist once, this speeds up future checks
            # and provides a list of non-existing files when complete
            local_paths.remove(local_path)

            # We need an actual key, not a "list key"
            # http://stackoverflow.com/a/18981298/24608
            key = bucket.get_key(key.name)

            with open(local_path, 'rb') as f:
                local_md5 = key.compute_md5(f)[0]

            # Hashes are different
            if key.get_metadata('md5') != local_md5:
                if not always:
                    # Ask user which file to take
                    which, always = _assets_confirm(local_path)

                if not which:
                    print 'Cancelling!'

                    return True

                if which == 'remote':
                    download = True
                elif which == 'local':
                    upload = True
        else:
            download = True

        if download:
            _assets_download(key, local_path)

        if upload:
            _assets_upload(local_path, key)

    action = None
    always = False

    # Iterate over files that didn't exist on S3
    for local_path in local_paths:
        key_name = local_path.replace(assets_root, s3_root, 1)
        key = bucket.get_key(key_name, validate=False)

        print local_path

        if not always:
            action, always = _assets_upload_confirm()

        if not action:
            print 'Cancelling!'

            return True

        if action == 'upload':
            _assets_upload(local_path, key)
        elif action == 'delete':
            _assets_delete(local_path, key)

    return False
Ejemplo n.º 45
0
def test_single(path, use='Chrome', screenshot=True, pymParent=False):
    """
    Test a graphic looking for browser warnings and errors
    Using selenium & chrome webdriver
    """
    screenshot = utils.prep_bool_arg(screenshot)
    pymParent = utils.prep_bool_arg(pymParent)
    log_content = []
    require('settings', provided_by=['production', 'staging'])
    slug, abspath = utils.parse_path(path)
    # Need to explicitly point to index.html for the AWS staging link
    file_suffix = ''
    if env.settings == 'staging':
        file_suffix = 'index.html'
    url = '%s/graphics/%s/%s' % (app_config.S3_BASE_URL, slug, file_suffix)
    logger.info('url: %s' % url)
    OUTPUT_PATH = os.path.join(cwd, '../test')
    # Create output files folder if needed
    if not os.path.exists(OUTPUT_PATH):
        os.makedirs(OUTPUT_PATH)
    driver = _choose_web_driver(use)
    try:
        if pymParent:
            driver.execute_script(RESET_SCRIPT)
        driver.get(url)
        # Wait for pym to be loaded
        # Execute a script that listens to the child message
        # and sets a global variable on the browser's window
        # Then make an explicit wait until the global var is set to true
        if pymParent:
            try:
                WebDriverWait(driver, app_config.TEST_SCRIPTS_TIMEOUT).until(
                    lambda driver: driver.execute_script(CHECK_PYM_SCRIPT)
                )
            except TimeoutException:
                logger.info("%s - Timeout: PymParent Not found." % (slug))
                line = [slug, 'INFO', 'Timeout: PymParent Not found']
                log_content.append(line)
        # Wait a configurable time for the page to load
        time.sleep(app_config.TESTS_LOAD_WAIT_TIME)
        if pymParent:
            # Force Pym Message communication
            driver.execute_script(COMUNICATION_SCRIPT)
            try:
                WebDriverWait(driver, app_config.TEST_SCRIPTS_TIMEOUT).until(
                    lambda driver: driver.execute_script(VALIDATION_SCRIPT)
                )
            except TimeoutException:
                logger.info("%s - Timeout: No messaging." % (
                    slug))
                line = [slug, 'INFO', 'Timeout: No messaging']
                log_content.append(line)
        log = driver.get_log('browser')
        if not log:
            logger.info("Test was successful")
        else:
            log_content.append(['id', 'level', 'message'])
            for entry in log:
                clean_message = u'%s' % (
                    safe_unicode(safe_str(entry['message'])))
                clean_message = clean_message.replace('\n', '')
                line = [slug, entry['level'], clean_message]
                log_content.append(line)
                if entry['level'] == 'ERROR':
                    logger.error("Reason %s" % clean_message)
                elif entry['level'] == 'WARNING':
                    logger.warning("Reason %s" % clean_message)
                else:
                    logger.info("Found some console.log output %s" % (
                        clean_message))
    finally:
        if screenshot:
            driver.save_screenshot('%s/%s-%s.png' % (OUTPUT_PATH,
                                                     env.settings,
                                                     slug))
        driver.quit()
        if log_content:
            with open('%s/%s-%s.log' % (OUTPUT_PATH,
                                        env.settings,
                                        slug), 'w') as writefile:
                writer = csv.writer(writefile, quoting=csv.QUOTE_MINIMAL)
                writer.writerows(log_content)
Ejemplo n.º 46
0
def bulk_test(csvpath, use='Chrome', screenshot=True, pymParent=False):
    """
    Test graphics browser warnings & errors -- use batch for multiple graphics
    Using selenium & chrome webdriver
    """
    screenshot = utils.prep_bool_arg(screenshot)
    pymParent = utils.prep_bool_arg(pymParent)
    fname = os.path.basename(csvpath)
    url_pattern = re.compile(r'(?:/|storyId=)(\d{9})/?')
    # Assume that a filepath is given read contents and clean them
    with open(csvpath, 'r') as f:
        content = f.readlines()
    content = [x.strip() for x in content]
    # Timestamp of the test
    ts = re.sub(r'\..*', '', str(datetime.datetime.now()))
    ts = re.sub(r'[\s:-]', '_', ts)
    log_content = [['id', 'level', 'message']]
    OUTPUT_PATH = os.path.join(cwd, '../test/%s' % ts)
    # Create output files folder if needed
    if not os.path.exists(OUTPUT_PATH):
        os.makedirs(OUTPUT_PATH)
    driver = _choose_web_driver(use)
    try:
        for ix, item in enumerate(content):
            if re.match(r'^https?://', item):
                m = url_pattern.search(item)
                if m:
                    slug = m.group(1)
                else:
                    slug = 'line%s' % (ix + 1)
                url = item
                env.settings = 'url'
            else:
                require('settings', provided_by=['production', 'staging'])
                slug, _ = utils.parse_path(item)
                # Need to explicitly point to index.html
                # for the AWS staging link
                file_suffix = ''
                if env.settings == 'staging':
                    file_suffix = 'index.html'
                url = '%s/graphics/%s/%s' % (app_config.S3_BASE_URL,
                                             slug, file_suffix)
            logger.info('url: %s' % url)
            if pymParent:
                driver.execute_script(RESET_SCRIPT)
            driver.get(url)
            # Wait for pym to be loaded
            if pymParent:
                try:
                    WebDriverWait(driver,
                                  app_config.TEST_SCRIPTS_TIMEOUT).until(
                        lambda driver: driver.execute_script(CHECK_PYM_SCRIPT)
                    )
                except TimeoutException:
                    logger.info("%s - Timeout: PymParent Not found." % (slug))
                    line = [slug, 'INFO', 'Timeout: Pym Not found']
                    log_content.append(line)
            # Wait a configurable time for the page to load
            time.sleep(app_config.TESTS_LOAD_WAIT_TIME)
            # Force Pym Message communication
            driver.execute_script(COMUNICATION_SCRIPT)
            if pymParent:
                try:
                    WebDriverWait(driver,
                                  app_config.TEST_SCRIPTS_TIMEOUT).until(
                        lambda driver: driver.execute_script(VALIDATION_SCRIPT)
                    )
                except TimeoutException:
                    logger.info("%s - Timeout: No messaging." % (slug))
                    line = [slug, 'INFO', 'Timeout: No messaging']
                    log_content.append(line)

            # Get browser log and parse output
            log = driver.get_log('browser')
            if not log:
                logger.info("%s - Test successful" % (slug))
                line = [slug, 'SUCCESS', 'Test successful with no logs']
                log_content.append(line)
            else:
                logger.warning("%s - Test found issues. Check log" % (
                    slug))
                for entry in log:
                    clean_message = u'%s' % (
                        safe_unicode(safe_str(entry['message'])))
                    clean_message = clean_message.replace('\n', '')
                    line = [slug, entry['level'], clean_message]
                    log_content.append(line)

            # Save screenshot
            if screenshot:
                driver.save_screenshot('%s/%s-%s.png' % (OUTPUT_PATH,
                                                         env.settings,
                                                         slug))
    finally:
        driver.quit()
        if log_content:
            with open('%s/test-%s' % (OUTPUT_PATH, fname), 'w') as writefile:
                writer = csv.writer(writefile, quoting=csv.QUOTE_MINIMAL)
                writer.writerows(log_content)