def end_write(self, with_errors, isnewfile=True):
     if not with_errors:
         parent, filename = os.path.split(self.rel_path)
         contentlength = os.stat(self.tmpfile_path).st_size
         if not self.check_repo_owner_quota(isnewfile=isnewfile, contentlength=contentlength):
             raise DAVError(HTTP_FORBIDDEN, "The quota of the repo owner is exceeded")
         seafile_api.put_file(self.repo.id, self.tmpfile_path, parent, filename,
                              self.username, None)
     if self.tmpfile_path:
         try:
             os.unlink(self.tmpfile_path)
         finally:
             self.tmpfile_path = None
 def endWrite(self, withErrors, isnewfile=True):
     if not withErrors:
         parent, filename = os.path.split(self.rel_path)
         contentlength = os.stat(self.tmpfile_path).st_size
         if not self.check_repo_owner_quota(isnewfile=isnewfile, contentlength=contentlength):
             raise DAVError(HTTP_FORBIDDEN, "The quota of the repo owner is exceeded")
         seafile_api.put_file(self.repo.id, self.tmpfile_path, parent, filename,
                              self.username, None)
     if self.tmpfile_path:
         try:
             os.unlink(self.tmpfile_path)
         finally:
             self.tmpfile_path = None
Exemple #3
0
    def create_file_with_content(self, file_name, parent_dir='/', content='abc',
                                 username=''):
        seafile_api.post_empty_file(self.repo.id, parent_dir, file_name, username)

        # first dump the file content to a tmp file, then update the file
        fd, tmp_file = mkstemp()

        try:
            bytesWritten = os.write(fd, 'junk content')
        except:
            bytesWritten = -1
        finally:
            os.close(fd)

        assert bytesWritten > 0

        seafile_api.put_file(self.repo.id, tmp_file, parent_dir, file_name,
                             '', None)
        return parent_dir + file_name
Exemple #4
0
    def create_file_with_content(self, file_name, parent_dir='/', content='junk content',
                                 username=''):
        seafile_api.post_empty_file(self.repo.id, parent_dir, file_name, username)

        # first dump the file content to a tmp file, then update the file
        fd, tmp_file = mkstemp()

        try:
            bytesWritten = os.write(fd, content.encode('utf-8'))
        except:
            bytesWritten = -1
        finally:
            os.close(fd)

        assert bytesWritten > 0

        seafile_api.put_file(self.repo.id, tmp_file, parent_dir, file_name,
                             '', None)
        return parent_dir + file_name
def test_file_operation():
    t_repo_version = 1
    t_repo_id1 = api.create_repo('test_file_operation1', '', USER, passwd=None)

    create_the_file()

    # test post_file
    assert api.post_file(t_repo_id1, file_path, '/', file_name, USER) == 0
    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + file_name)
    t_file_size = len(file_content)
    assert t_file_size == api.get_file_size(t_repo_id1, t_repo_version,
                                            t_file_id)

    # test post_dir
    assert api.post_dir(t_repo_id1, '/', dir_name, USER) == 0

    # test copy_file (synchronize)
    t_copy_file_result1 = api.copy_file(t_repo_id1, '/', file_name, t_repo_id1,
                                        '/', new_file_name, USER, 0, 1)
    assert t_copy_file_result1
    assert t_copy_file_result1.task_id is None
    assert not t_copy_file_result1.background
    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + new_file_name)
    assert t_file_size == api.get_file_size(t_repo_id1, t_repo_version,
                                            t_file_id)

    # test copy_file (asynchronous)
    t_repo_id2 = api.create_repo('test_file_operation2', '', USER, passwd=None)
    usage = api.get_user_self_usage(USER)
    api.set_user_quota(USER, usage + 1)
    t_copy_file_result2 = api.copy_file(t_repo_id1, '/', file_name, t_repo_id2,
                                        '/', file_name, USER, 1, 0)
    assert t_copy_file_result2
    assert t_copy_file_result2.background
    while True:
        time.sleep(0.1)
        t_copy_task = api.get_copy_task(t_copy_file_result2.task_id)
        assert t_copy_task.failed
        assert t_copy_task.failed_reason == 'Quota is full'
        if t_copy_task.failed:
            break

    api.set_user_quota(USER, -1)
    t_copy_file_result2 = api.copy_file(t_repo_id1, '/', file_name, t_repo_id2,
                                        '/', file_name, USER, 1, 0)
    assert t_copy_file_result2
    assert t_copy_file_result2.task_id
    assert t_copy_file_result2.background
    while True:
        time.sleep(0.1)
        t_copy_task = api.get_copy_task(t_copy_file_result2.task_id)
        if t_copy_task.successful:
            break
    t_file_id = api.get_file_id_by_path(t_repo_id2, '/' + file_name)
    assert t_file_size == api.get_file_size(t_repo_id2, t_repo_version,
                                            t_file_id)

    # test move_file (synchronize)
    t_move_file_info1 = api.get_dirent_by_path(t_repo_id1, '/' + new_file_name)
    t_move_file_result1 = api.move_file(t_repo_id1, '/', new_file_name,
                                        t_repo_id1, '/' + dir_name,
                                        new_file_name, 1, USER, 0, 1)
    assert t_move_file_result1
    t_move_file_info2 = api.get_dirent_by_path(
        t_repo_id1, '/' + dir_name + '/' + new_file_name)
    assert t_move_file_info1.mtime == t_move_file_info2.mtime
    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + new_file_name)
    assert t_file_id is None

    # test move_file (synchronize)
    t_move_file_result1 = api.move_file(t_repo_id1, '/' + dir_name,
                                        new_file_name, t_repo_id1, '/',
                                        new_file_name_2, 1, USER, 0, 1)
    assert t_move_file_result1
    t_file_id = api.get_file_id_by_path(t_repo_id1,
                                        '/' + dir_name + '/' + new_file_name)
    assert t_file_id is None

    # test move_file (asynchronous)
    usage = api.get_user_self_usage(USER)
    api.set_user_quota(USER, usage + 1)
    t_move_file_result2 = api.move_file(t_repo_id1, '/', file_name, t_repo_id2,
                                        '/', new_file_name, 1, USER, 1, 0)
    assert t_move_file_result2
    assert t_move_file_result2.task_id
    assert t_move_file_result2.background
    while True:
        time.sleep(0.1)
        t_move_task = api.get_copy_task(t_move_file_result2.task_id)
        assert t_move_task.failed
        assert t_move_task.failed_reason == 'Quota is full'
        if t_move_task.failed:
            break

    api.set_user_quota(USER, -1)
    t_move_file_result2 = api.move_file(t_repo_id1, '/', file_name, t_repo_id2,
                                        '/', new_file_name, 1, USER, 1, 0)
    assert t_move_file_result2
    assert t_move_file_result2.task_id
    assert t_move_file_result2.background
    while True:
        time.sleep(0.1)
        t_move_task = api.get_copy_task(t_move_file_result2.task_id)
        if t_move_task.successful:
            break
    t_file_id = api.get_file_id_by_path(t_repo_id2, '/' + new_file_name)
    assert t_file_size == api.get_file_size(t_repo_id2, t_repo_version,
                                            t_file_id)

    # test post_empty_file
    assert api.post_empty_file(t_repo_id1, '/' + dir_name, empty_file_name,
                               USER) == 0
    t_file_id = api.get_file_id_by_path(t_repo_id1,
                                        '/' + dir_name + '/' + empty_file_name)
    assert api.get_file_size(t_repo_id1, t_repo_version, t_file_id) == 0

    # test rename_file
    assert api.rename_file(t_repo_id1, '/' + dir_name, empty_file_name,
                           new_empty_file_name, USER) == 0

    #test put_file
    t_new_file_id = api.put_file(t_repo_id1, file_path, '/' + dir_name,
                                 new_empty_file_name, USER, None)
    assert t_new_file_id

    # test get_file_revisions
    t_commit_list = api.get_file_revisions(t_repo_id2, None, '/' + file_name,
                                           2)
    assert t_commit_list
    assert len(t_commit_list) == 2
    assert t_commit_list[0].creator_name == USER

    # test del_file
    assert api.del_file(t_repo_id2, '/', file_name, USER) == 0

    # test get_deleted
    t_deleted_file_list = api.get_deleted(t_repo_id2, 1)
    assert t_deleted_file_list
    assert len(t_deleted_file_list) == 2
    assert t_deleted_file_list[0].obj_name == file_name
    assert t_deleted_file_list[0].basedir == '/'

    # test del a non-exist file. should return 0.
    assert api.del_file(t_repo_id2, '/', file_name, USER) == 0

    assert api.del_file(t_repo_id1, '/' + dir_name, new_empty_file_name,
                        USER) == 0
    assert api.del_file(t_repo_id1, '/' + dir_name, new_file_name, USER) == 0
    assert api.del_file(t_repo_id2, '/', new_file_name, USER) == 0
    assert api.del_file(t_repo_id1, '/', new_file_name_2, USER) == 0

    time.sleep(1)
    api.remove_repo(t_repo_id1)
 def endWrite(self, withErrors):
     if not withErrors:
         parent, filename = os.path.split(self.rel_path)
         seafile_api.put_file(self.repo.id, self.tmpfile_path, parent, filename,
                              self.username, None)
     os.unlink(self.tmpfile_path)
Exemple #7
0
def test_cdc_completely(create_tmp_repo):
    """Test CDC generation, complete workflow
    """

    repo = create_tmp_repo

    # TEST DEFAULT LIBRARY
    """check default library files are avialable"""
    kdl = get_keeper_default_library()
    if kdl:
        check_set = set([d.obj_name for d in kdl['dirents']])
    else:
        pytest.fail(msg="Default Library is empty, please install!")
    """copy library default files to lib"""
    copy_keeper_default_library(repo.id)
    sleep(2)

    # get copied files
    # commits = seafile_api.get_commit_list(repo.id, 0, 1)
    # commit = commit_mgr.load_commit(repo.id, repo.version, commits[0].id)
    # dir = fs_mgr.load_seafdir(repo.id, repo.version, commit.root_id)
    dir = get_root_dir(repo)

    # check copied files
    for fn in check_set:
        if not dir.lookup(fn):
            pytest.fail("Cannot find %s!" % fn)

    # TEST CDC GENERATION
    """add some txt file"""
    f = tempfile.NamedTemporaryFile()
    f.write('Text')
    f.flush()
    os.chmod(f.name, 0o666)

    seafile_api.post_file(repo.id, f.name, "/", "some_file.txt", SERVER_EMAIL)
    f.close()
    """update md file with correct fields"""
    f = tempfile.NamedTemporaryFile()
    f.write(MD_GOOD)
    f.flush()
    os.chmod(f.name, 0o666)

    seafile_api.put_file(repo.id, f.name, "/", ARCHIVE_METADATA_TARGET,
                         SERVER_EMAIL, None)
    f.close()

    sleep(10)
    """check cdc pdf exists"""
    dir = get_root_dir(repo)
    cdc_pdfs = [
        f.name for f in dir.get_files_list()
        if f.name.startswith(CDC_PDF_PREFIX) and f.name.endswith('.pdf')
    ]
    if not cdc_pdfs:
        pytest.fail("Cannot find cdc pdf in repo %s!" % repo.name)
    """check cdc pdf remove"""
    seafile_api.del_file(repo.id, "/", cdc_pdfs[0], SERVER_EMAIL)

    sleep(10)

    dir = get_root_dir(repo)
    cdc_pdfs = [
        f.name for f in dir.get_files_list()
        if f.name.startswith(CDC_PDF_PREFIX) and f.name.endswith('.pdf')
    ]
    assert not cdc_pdfs, "cdc pdf should not be recreated if it has been deleted"
    """update md file with corrected field"""
    f = tempfile.NamedTemporaryFile()
    f.write(MD_GOOD.replace("2010", "2017"))
    f.flush()
    os.chmod(f.name, 0o666)

    seafile_api.put_file(repo.id, f.name, "/", ARCHIVE_METADATA_TARGET,
                         SERVER_EMAIL, None)
    f.close()

    sleep(10)

    dir = get_root_dir(repo)
    cdc_pdfs = [
        f.name for f in dir.get_files_list()
        if f.name.startswith(CDC_PDF_PREFIX) and f.name.endswith('.pdf')
    ]
    assert cdc_pdfs, "cdc pdf should be recreated if metadata md has been changed"
 def endWrite(self, withErrors):
     if not withErrors:
         parent, filename = os.path.split(self.rel_path)
         seafile_api.put_file(self.repo.id, self.tmpfile_path, parent,
                              filename, self.username, None)
     os.unlink(self.tmpfile_path)
Exemple #9
0
def generate_certificate(repo, commit):
    """ Generate Cared Data Certificate according to markdown file """

    #exit if repo encrypted
    if repo.encrypted:
        return False

    # exit if repo is system template
    if repo.rep_desc == TEMPLATE_DESC:
        return False

    dir = fs_mgr.load_seafdir(repo.id, repo.version, commit.root_id)

    # certificate already exists in root
    # file_names = [f.name for f in dir.get_files_list()]
    # if any(file_name.startswith(CDC_PDF_PREFIX) and file_name.endswith('.pdf') for file_name in file_names):
        # return False


    # get latest version of the ARCHIVE_METADATA_TARGET
    file = dir.lookup(ARCHIVE_METADATA_TARGET)

    #exit if no metadata file exists
    if not file:
        return False

    #check wether there is at least one creative dirent
    if not has_at_least_one_creative_dirent(dir):
        return False
    logging.info('Repo has creative dirents')


    try:
        db = get_db(KEEPER_DB_NAME)

        cur = db.cursor()

        # if is_certified(db, cur, repo.id):
            # return False

        owner = seafile_api.get_repo_owner(repo.id)
        logging.info("Certifying repo id: %s, name: %s, owner: %s ..." % (repo.id, repo.name, owner))
        cdc_dict = parse_markdown(file.get_content())
        if validate(cdc_dict):

            cdc_id = register_cdc_in_db(db, cur, repo.id, owner)

            logging.info("Generate CDC PDF...")
            cdc_pdf = CDC_PDF_PREFIX + cdc_id + ".pdf"
            # TODO: specify which url should be in CDC
            # as tmp decision: SERVICE_URL
            # repo_share_url = get_repo_share_url(repo.id, owner)
            repo_share_url = SERVICE_URL
            jars = ":".join(map(lambda e : MODULE_PATH + '/' + e, CDC_GENERATOR_JARS))
            args = [ "java", "-cp", jars, CDC_GENERATOR_MAIN_CLASS,
                    "-i", "\"" + cdc_id + "\"",
                    "-t", "\"" + cdc_dict['Title']  + "\"",
                    "-aa", "\"" + cdc_dict['Author']  + "\"",
                    "-d", "\"" + cdc_dict['Description']  + "\"",
                    "-c", "\"" + owner  + "\"",
                    "-u", "\"" + repo_share_url  + "\"",
                    cdc_pdf ]
            check_call(args)
            tmp_path = os.path.abspath(cdc_pdf)
            logging.info("PDF sucessfully generated")

            logging.info("Add " + cdc_pdf + " to the repo...")
            if UPDATE:
                seafile_api.put_file(repo.id, tmp_path, "/", cdc_pdf, SERVER_EMAIL, None)
                logging.info("Sucessfully updated")
            else:
                seafile_api.post_file(repo.id, tmp_path, "/", cdc_pdf, SERVER_EMAIL)
                logging.info("Sucessfully added")
            if not DEBUG:
                send_email(owner, {'USER_NAME': get_user_name(owner), 'PROJECT_NAME':repo.name, 'PROJECT_URL':get_repo_pivate_url(repo.id) })

                        #TODO: Send seafile notification
    except Exception as err:
        logging.info(str(err))
    finally:
       # other final stuff
        db.close()
        if 'tmp_path' in vars() and os.path.exists(tmp_path):
            os.remove(tmp_path)

    return True
Exemple #10
0
def generate_certificate(repo, commit):
    """ Generate Cared Data Certificate according to markdown file """

    event = None

    # exit if repo encrypted
    if repo.encrypted:
        return False

    # exit if repo is system template
    if repo.rep_desc == TEMPLATE_DESC:
        return False

    # TODO: if cdc pdf is deleted: set cert status to False and exit
    # see https://github.com/MPDL/KEEPER/issues/41
    if re.match(EVENT_PATTERNS['CDC_PDF_DELETED'], commit.desc):
        event = EVENT.pdf_delete
        Catalog.objects.update_cert_status_by_repo_id(repo.id, False)
        return False

    if re.match(EVENT_PATTERNS['ARCHIVE_METADATA_TARGET_MODIFIED'],
                commit.desc):
        event = EVENT.md_modified

    try:

        cdc_id = get_cdc_id_by_repo(repo.id)

        if cdc_id is not None:
            if re.match(EVENT_PATTERNS['CDC_PDF_DELETED'], commit.desc):
                # if cdc pdf is deleted, add pdf again!
                event = EVENT.pdf_delete
            elif event != EVENT.md_modified:
                # exit if already certified and MD has not been changed
                return False

        dir = fs_mgr.load_seafdir(repo.id, repo.version, commit.root_id)

        # certificate already exists in root
        # file_names = [f.name for f in dir.get_files_list()]
        # if any(file_name.startswith(CDC_PDF_PREFIX) and file_name.endswith('.pdf') for file_name in file_names):
        # return False

        # get latest version of the ARCHIVE_METADATA_TARGET
        file = dir.lookup(ARCHIVE_METADATA_TARGET)

        #exit if no metadata file exists
        if not file:
            return False

        # check whether there is at least one creative dirent
        if not has_at_least_one_creative_dirent(dir):
            return False
        logger.info('Repo has creative dirents')

        owner = seafile_api.get_repo_owner(repo.id)
        logger.info("Certifying repo id: %s, name: %s, owner: %s ..." %
                    (repo.id, repo.name, owner))
        content = file.get_content().decode('utf-8')
        cdc_dict = parse_markdown(content)

        status = 'metadata are not valid'
        is_successful = False

        if validate(cdc_dict):

            status = 'metadata are valid'

            if event == EVENT.pdf_delete:
                # only modified update
                cdc_id = register_cdc_in_db(repo.id, owner)[0]
            else:
                cdc_id, event = register_cdc_in_db(repo.id, owner)

            cdc_id = str(cdc_id)
            logger.info("Generate CDC PDF...")
            cdc_pdf = CDC_PDF_PREFIX + cdc_id + ".pdf"
            jars = ":".join(
                [MODULE_PATH + '/' + e for e in CDC_GENERATOR_JARS])
            tmp_path = tempfile.gettempdir() + "/" + cdc_pdf
            args = [
                "date;",
                "java",
                "-cp",
                jars,
                CDC_GENERATOR_MAIN_CLASS,
                "-i",
                quote_arg(cdc_id),
                "-t",
                quote_arg(cdc_dict['Title']),
                "-aa",
                quote_arg(cdc_dict['Author']),
                "-d",
                quote_arg(cdc_dict['Description']),
                "-c",
                quote_arg(owner),
                "-u",
                quote_arg(SERVICE_URL),
                tmp_path,
                "1>&2;",
            ]
            try:
                call_str = " ".join(args)
                logger.info(call_str)
                # subprocess.call(call_str, shell=True)
                # p = subprocess.Popen(call_str, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                p = subprocess.Popen(call_str,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     shell=True)
                stdout, stderr = p.communicate()
                if stdout:
                    logger.info(stdout)
                if stderr:
                    logger.error(stderr)

            except Exception as err:
                logger.error('Cannot call command')
                logger.error(traceback.format_exc())
                raise err

            if os.path.isfile(tmp_path):
                logger.info("PDF sucessfully generated, tmp_path=%s" %
                            tmp_path)
            else:
                logger.error(
                    "Cannot find generated CDC PDF, tmp_path=%s, exiting..." %
                    tmp_path)
                if event == EVENT.db_create:
                    # TODO: test
                    CDC.objects.get(cdc_id=cdc_id).delete()
                return False

            logger.info("Add " + cdc_pdf + " to the repo...")
            if event == EVENT.db_update:
                # cdc pdf already exists, then put
                if dir.lookup(cdc_pdf):
                    seafile_api.put_file(repo.id, tmp_path, "/", cdc_pdf,
                                         SERVER_EMAIL, None)
                    logger.info("Sucessfully updated")
                    status = 'updated'
                    CDC_MSG.append("Certificate has been updated")
                # post otherwise
                else:
                    seafile_api.post_file(repo.id, tmp_path, "/", cdc_pdf,
                                          SERVER_EMAIL)
                    logger.info("Sucessfully recreated")
                    status = 'recreated'
                    CDC_MSG.append("Certificate has been recreated")
                logging.info(
                    "CDC has been successfully updated for repo %s, id: %s" %
                    (repo.id, cdc_id))
            else:
                seafile_api.post_file(repo.id, tmp_path, "/", cdc_pdf,
                                      SERVER_EMAIL)
                logger.info("Sucessfully created")
                status = 'created'
                CDC_MSG.append("Certificate has been sucessfully created")
                # if not DEBUG:
                send_email(
                    owner, {
                        'SERVICE_URL': SERVICE_URL,
                        'USER_NAME': get_user_name(owner),
                        'PROJECT_NAME': repo.name,
                        'PROJECT_TITLE': cdc_dict['Title'],
                        'PROJECT_URL': get_repo_pivate_url(repo.id),
                        'AUTHOR_LIST': get_authors_for_email(
                            cdc_dict['Author']),
                        'CDC_PDF_URL': get_file_pivate_url(repo.id, cdc_pdf),
                        'CDC_ID': cdc_id
                    })
                logging.info(
                    "CDC has been successfully created for repo %s, id: %s" %
                    (repo.id, cdc_id))

            is_successful = True

        #send user notification
        logger.info("Commit desc: " + commit.desc)
        logger.info("event: {}".format(event))
        if event in (EVENT.md_modified, EVENT.db_create, EVENT.db_update):
            header = "Certificate processed for library" if is_successful else "Cannot process certificate for library"
            UserNotification.objects._add_user_notification(
                owner, MSG_TYPE_KEEPER_CDC_MSG,
                json.dumps({
                    'header': header,
                    'status': status,
                    'message': ('; '.join(CDC_MSG)),
                    'msg_from': SERVER_EMAIL,
                    'lib': repo.id,
                    'lib_name': repo.name
                }))

    except Exception as err:
        logger.error(traceback.format_exc())
        logging.error(
            "CDC generation for repo %s has been failed, check %s for details. \nTraceback:"
            % (repo.id, CDC_LOG))
        logging.error(traceback.format_exc())
    finally:
        if 'tmp_path' in vars() and os.path.exists(tmp_path):
            os.remove(tmp_path)

    return True
Exemple #11
0
def save_archive_metadata(repo_id, md_dict):
    """
    Convert archive metadata dictionary into markdown format
    and put it into repo_id library
    """
    p = "## %s\n%s\n\n"
    md_markdown = ARCHIVE_METADATA_TARGET_HEADER
    md_markdown += p % ('Title', md_dict.get('title', ''))

    # authors
    al = md_dict.get('authors', '')
    a_res = []
    if al and type(al) is list:
        a_tmp = ""
        for a in al:
            # Name
            n_tmp = []
            ln = a.get('lastName', '').strip()
            ln and n_tmp.append(ln)
            fn = a.get('firstName', '').strip()
            fn and n_tmp.append(fn)
            a_tmp = ", ".join(n_tmp)

            # Affiliations
            affs = a.get('affs')
            affs_str = ''
            if affs:
                affs = [aff for aff in affs if aff and aff.strip()]
                if affs:
                    affs_str = " | ".join(affs)

            a_res.append("; ".join(list(filter(None, [a_tmp, affs_str]))))

    md_markdown += p % ('Author', '\n'.join(a_res))
    md_markdown += p % ('Publisher', md_dict.get('publisher', ''))
    md_markdown += p % ('Description', md_dict.get('description', ''))
    md_markdown += p % ('Year', md_dict.get('year', ''))

    # institute
    dl = md_dict.get('directors', '')
    d_tmp = []
    if dl and type(dl) is list:
        for d in dl:
            n_tmp = []
            ln = d.get('lastName', '').strip()
            ln and n_tmp.append(ln)
            fn = d.get('firstName', '').strip()
            fn and n_tmp.append(fn)
            n_tmp and d_tmp.append(", ".join(n_tmp))

    md_markdown += p % (
        'Institute',
        "; ".join([
            md_dict.get('institute', ''),
            md_dict.get('department', ''),
            # TODO: mutiply directors revision!!!!
            " | ".join(d_tmp)
        ]))

    md_markdown += p % ('Resource Type', md_dict.get('resourceType', ''))
    md_markdown += p % ('License', md_dict.get('license', ''))
    md_markdown = md_markdown[:-2]

    try:
        with tempfile.NamedTemporaryFile() as fp:
            fp.write(md_markdown.encode())
            fp.flush()
            if seafile_api.get_dirent_by_path(repo_id,
                                              ARCHIVE_METADATA_TARGET):
                # file exists, update
                seafile_api.put_file(repo_id, fp.name, "/",
                                     ARCHIVE_METADATA_TARGET, SERVER_EMAIL,
                                     None)
            else:
                # create file
                seafile_api.post_file(repo_id, fp.name, "/",
                                      ARCHIVE_METADATA_TARGET, SERVER_EMAIL)
    except Exception as e:
        logger.error('Cannot save file %s: %r', ARCHIVE_METADATA_TARGET, e)