Esempio n. 1
0
def get_continuation_studies():
    user = data_utils.get_current_user()
    profiles = Profile().get_for_user(user.id)
    output = list()
    for p in profiles:
        output.append({"value": p.title, "label": p._id})
    return output
Esempio n. 2
0
    def _get_dataverse(self, profile_id):
        # create new dataverse if none already exists
        u = data_utils.get_current_user()
        # create new dataverse if none exists already
        dv_details = Profile().check_for_dataverse_details(profile_id)
        if not dv_details:
            # dataverse = connection.create_dataverse(dv_alias, '{0} {1}'.format(u.first_name, u.last_name), u.email)
            dv_details = self._create_dataverse(profile_id)
            Profile().add_dataverse_details(profile_id, dv_details)

        return dv_details
Esempio n. 3
0
    def save_record(self, auto_fields=dict(), **kwargs):
        if not kwargs.get("target_id", str()):
            repo = kwargs.pop("repository", str())
            for k, v in dict(repository=repo,
                             status=False,
                             complete='false',
                             is_cg=str(repo == "cg_core"),
                             user_id=data_utils.get_current_user().id,
                             date_created=data_utils.get_datetime()).items():
                auto_fields[self.get_qualified_field(k)] = v

        return super(Submission, self).save_record(auto_fields, **kwargs)
Esempio n. 4
0
def view_user_info(request):
    user = data_utils.get_current_user()
    # op = Orcid().get_orcid_profile(user)
    d = SocialAccount.objects.get(user_id=user.id)
    op = json.loads(json.dumps(d.extra_data).replace("-", "_"))

    repo_ids = user.userdetails.repo_submitter
    repos = Repository().get_by_ids(repo_ids)
    # data_dict = jsonpickle.encode(data_dict)
    data_dict = {'orcid': op, "repos": repos}

    return render(request, 'copo/user_info.html', data_dict)
Esempio n. 5
0
    def get_for_user(self, user=None):
        if not user:
            user = data_utils.get_current_user().id
        docs = self.get_collection_handle().find({
            "user_id":
            user,
            "deleted":
            data_utils.get_not_deleted_flag()
        }).sort([['_id', -1]])

        if docs:
            return docs
        else:
            return None
Esempio n. 6
0
    def isValidCredentials(self, user_id):

        # check if token exists for user
        token = Figshare().get_token_for_user(
            user_id=data_utils.get_current_user().id)
        if token:
            # now check if token works
            headers = {'Authorization': 'token ' + token['token']}
            r = requests.get('https://api.figshare.com/v2/account/articles',
                             headers=headers)
            if r.status_code == 200:
                return True
            else:
                # we have an invalid token stored, so we should delete it and prompt the user for a new one
                Figshare().delete_tokens_for_user(user_id=user_id)
        return False
Esempio n. 7
0
    def get_profiles_status(self):
        # this method examines all the profiles owned by the current user and returns
        # the number of profiles which have been marked as dirty
        issues = {}
        issue_desc = []
        issue_id = []
        issues_count = 0
        try:
            user_id = data_utils.get_current_user().id
            prof = Profiles.find({"user_id": user_id})
        except AttributeError as e:
            prof = []

        # iterate profiles and find collections which are dirty
        for p in prof:
            try:
                collections_ids = p['collections']
            except:
                issues_count += 1
                context = {"profile_name": p['title'], "link": reverse('copo:view_copo_profile', args=[p["_id"]])}
                issue_desc.append(STATUS_CODES['PROFILE_EMPTY'].format(**context))
                break
            # now get the corresponding collection_heads
            collections_heads = Collections.find({'_id': {'$in': collections_ids}},
                                                 {'is_clean': 1, 'collection_details': 1})
            # for c in collections_heads:
            #     try:
            #         if c['is_clean'] == 0:
            #             profile = Profile().get_profile_from_collection_id(c["_id"])
            #             issues_count += 1
            #             context = {}
            #             context["profile_name"] = p['title']
            #             context["link"] = reverse('copo:view_copo_profile', args=[profile["_id"]])
            #
            #             # now work out why the collection is dirty
            #             if False:
            #                 pass
            #             else:
            #                 issue_desc.append(STATUS_CODES['PROFILE_NOT_DEPOSITED'].format(**context))
            #     except:
            #         pass
        issues['issue_id_list'] = issue_id
        issues['num_issues'] = issues_count
        issues['issue_description_list'] = issue_desc
        return issues
Esempio n. 8
0
    def get_shared_for_user(self, user=None):
        # get profiles shared with user
        if not user:
            user = data_utils.get_current_user().id
        groups = CopoGroup().Group.find({'member_ids': str(user)})

        p_list = list()
        for g in groups:
            gp = dict(g)
            p_list.extend(gp['shared_profile_ids'])
        # remove duplicates
        # p_list = list(set(p_list))
        docs = self.get_collection_handle().find({
            "_id": {
                "$in": p_list
            },
            "deleted":
            data_utils.get_not_deleted_flag()
        })
        out = list(docs)
        for d in out:
            d['shared'] = True

        return out
Esempio n. 9
0
    def create_sra_person(self):
        """
        creates an (SRA) person record and attach to profile
        Returns:
        """

        people = self.get_all_records()
        sra_roles = list()
        for record in people:
            for role in record.get("roles", list()):
                sra_roles.append(role.get("annotationValue", str()))

        # has sra roles?
        has_sra_roles = all(
            x in sra_roles
            for x in ['SRA Inform On Status', 'SRA Inform On Error'])

        if not has_sra_roles:
            try:
                user = data_utils.get_current_user()

                auto_fields = {
                    'copo.person.roles.annotationValue':
                    'SRA Inform On Status',
                    'copo.person.lastName': user.last_name,
                    'copo.person.firstName': user.first_name,
                    'copo.person.roles.annotationValue___0___1':
                    'SRA Inform On Error',
                    'copo.person.email': user.email
                }
            except Exception as e:
                pass
            else:
                kwargs = dict()
                self.save_record(auto_fields, **kwargs)
        return
Esempio n. 10
0
def get_tokens_for_user(request):
    user = data_utils.get_current_user().id
    # get Figshare Tokens
    t = util.cursor_to_list(Figshare().get_figshare_tokens_for_user(user))
    return HttpResponse(json_util.dumps({'figshare_tokens': t}))
Esempio n. 11
0
    def __call__(self, request):
        # Code to be executed for each request before
        # the view (and later middleware) are called.

        url = request.get_full_path()
        if url.startswith('/copo', 0, 5):

            doc = Submission().get_incomplete_submissions_for_user(
                request.user.id, figshare)
            data_dict = dict()
            token = None

            if doc.count() > 0:

                if 'code' in request.GET and 'state' in request.GET:

                    token_obtained = True

                    for d in doc:
                        if d.get('token_obtained') == 'false':
                            token_obtained = False
                            break

                    if not token_obtained:

                        # get new token from Figshare
                        code = request.GET.get('code')
                        client_id = FIGSHARE_CREDENTIALS['client_id']
                        token_url = FIGSHARE_API_URLS['authorization_token']

                        # now get token
                        data = {
                            'client_id': client_id,
                            'code': code,
                            'client_secret':
                            FIGSHARE_CREDENTIALS['client_secret'],
                            'grant_type': 'authorization_code',
                            'scope': 'all'
                        }
                        try:
                            r = requests.post(token_url, data)
                            data_dict = ast.literal_eval(
                                r.content.decode('utf-8'))
                            token = data_dict['token']
                            t = Figshare().put_token_for_user(
                                user_id=data_utils.get_current_user().id,
                                token=token)
                            if t:
                                # mark fighshare submissions for this user as token obtained
                                Submission().mark_all_token_obtained(
                                    user_id=request.user.id)

                                # if all is well, the access token will be stored in FigshareSubmussionCollection
                        except Exception as e:
                            print(e)

                    else:
                        # retrieve token
                        token = Figshare().get_token_for_user(
                            user_id=data_utils.get_current_user().id)

                        # request.session['partial_submissions'] = doc
            else:
                request.session['partial_submissions'] = None
Esempio n. 12
0
def do_analysis_xml(sub_id):
    sub = Submission().get_record(sub_id)
    dfs = list()
    for d in sub["bundle"]:
        dfs.append(DataFile().get_record(d))
    df = dfs[0]
    p = Profile().get_record(df["profile_id"])
    analysis_set = Element("ANALYSIS_SET")
    analysis = Element("ANALYSIS")
    alias = make_alias(sub)
    analysis.set("alias", alias + "_anaysis")
    center_name = df["description"]["attributes"]["study_type"][
        "study_analysis_center_name"]
    analysis.set("analysis_center", center_name)
    broker_name = df["description"]["attributes"]["study_type"]["study_broker"]
    analysis.set("broker_name", broker_name)
    analysis_date = df["description"]["attributes"]["study_type"][
        "study_analysis_date"]
    # ad = analysis_date.split('/')
    # d = datetime.date(int(ad[2]), int(ad[1]), int(ad[0]))
    # analysis.set("anlalysis_date", d)
    # analysis_set.append(analysis)

    title = Element("TITLE")
    title.text = df["description"]["attributes"]["study_type"]["study_title"]
    analysis.append(title)

    description = Element("DESCRIPTION")
    description.text = df["description"]["attributes"]["study_type"][
        "study_description"]
    analysis.append(description)

    study_ref = Element("STUDY_REF")
    study_ref.set("refname", str(sub["_id"]))
    analysis.append(study_ref)

    # TODO - Sample is not required for annotation submissions....ENA documentation saying it is is not correct. Will remove these stages from the wizard at some point
    s_ref = get_sample_ref(df)
    sample_ref = Element("SAMPLE_REF")
    sample_ref.set("refname", s_ref)
    # analysis.append(sample_ref)

    analysis_type = Element("ANALYSIS_TYPE")
    SubElement(analysis_type, "SEQUENCE_ANNOTATION")
    analysis.append(analysis_type)

    files = Element("FILES")
    file = Element("FILE")
    filename = df["name"]
    file_hash = df["file_hash"]

    fqfn = str(
        sub_id) + '/' + data_utils.get_current_user().username + '/' + filename

    file.set("filename", fqfn)
    file.set("filetype", "tab")
    file.set("checksum_method", "MD5")
    file.set("checksum", file_hash)
    file.set("unencrypted_checksum", file_hash)
    files.append(file)
    analysis.append(files)

    attrs = Element("ANALYSIS_ATTRIBUTES")
    for a in df["description"]["attributes"]["attach_study_samples"][
            "attributes"]:
        attr = Element("ANALYSIS_ATTRIBUTE")
        tag = Element("TAG")
        tag.text = a["name"]
        value = Element("VALUE")
        value.text = a["value"]
        attr.append(tag)
        attr.append(value)
        attrs.append(attr)

    analysis.append(attrs)

    return prettify(analysis)
Esempio n. 13
0
def generate_filename2(instance, filename):
    partition = datetime.datetime.now().strftime("%H_%M_%S_%f")
    filename = os.path.join(settings.UPLOAD_PATH, str(data_utils.get_current_user().id), partition, instance.filename)
    return filename