def output_path(self):
     return os.path.join(
         djangui_settings.DJANGUI_FILE_DIR,
         get_valid_filename(self.user.username if self.user is not None else ""),
         get_valid_filename(self.script.slug if not self.script.save_path else self.script.save_path),
         str(self.pk),
     )
Exemple #2
0
    def _calculate_remote_path(self, job_exe, input_file_ids):
        '''Returns the remote path for storing the products

        :param job_exe: The job execution model (with related job and job_type fields) that is storing the files
        :type job_exe: :class:`job.models.JobExecution`
        :param input_file_ids: Set of input file IDs
        :type input_file_ids: set of int
        :returns: The remote path for storing the products
        :rtype: str
        '''

        job_type_path = get_valid_filename(job_exe.job.job_type.name)
        job_version_path = get_valid_filename(job_exe.job.job_type.version)
        remote_path = os.path.join(job_type_path, job_version_path)

        # Try to use data start time from earliest ancestor source file
        the_date = None
        for source_file in FileAncestryLink.objects.get_source_ancestors(list(input_file_ids)):
            if source_file.data_started:
                if not the_date or source_file.data_started < the_date:
                    the_date = source_file.data_started

        # No data start time populated, use current time
        if not the_date:
            remote_path = os.path.join(remote_path, 'unknown_source_data_time')
            the_date = now()

        year_dir = str(the_date.year)
        month_dir = u'%02d' % the_date.month
        day_dir = u'%02d' % the_date.day
        return os.path.join(remote_path, year_dir, month_dir, day_dir, 'job_exe_%i' % job_exe.id)
Exemple #3
0
def content_filename(instance, filename):
    return os.path.join(
        settings.MEDIA_ROOT,
        'auth_app',
        'files',
        get_valid_filename(type(instance).__name__.lower()),
        get_valid_filename(filename),
    )
Exemple #4
0
def webcam_upload(request, run_uuid):
    """
    Receives upload requests for video/audio chunks during the experiment and
    merges these chunks into a file.
    """

    fs = FileSystemStorage(location=settings.WEBCAM_ROOT)

    # Upload request
    if request.method == 'POST' and request.FILES.get('file'):
        webcam_file = request.FILES.get('file')
        # webcam_file_type = request.POST.get('type')

        # Delete existing file
        if fs.exists(webcam_file.name):
            fs.delete(webcam_file.name)

        fs.save(get_valid_filename(webcam_file.name), webcam_file)
        logger.info('Received upload request of %s.' % webcam_file.name)
        return HttpResponse(status=204)

    # Merge request
    elif request.method == 'POST' and request.POST.get('trialResultId'):
        # Get base filename, by removing chunk number at the end
        base_filename = request.POST.get('filename')
        base_filename = get_valid_filename(base_filename)
        logger.info('Received last file of %s, merge files.' % base_filename)

        # Find and merge individual chunks
        webcam_files = find_files(base_filename)
        merge_files(base_filename + '.webm', webcam_files)

        # Delete chunks
        for webcam_file in webcam_files:
            fs.delete(webcam_file)

        # Add filename to trial result
        trial_result_id = 0
        try:
            trial_result_id = int(request.POST.get('trialResultId'))
        except ValueError as e:
            logger.exception('Failed to retrieve trial result ID: ' + str(e))
            raise Http404('Invalid trialResultId.')
        trial_result = get_object_or_404(TrialResult,
                                         pk=trial_result_id,
                                         subject=run_uuid)
        trial_result.webcam_file = base_filename + '.webm'
        trial_result.save()
        logger.info('Successfully saved webcam file to trial result.')
        return HttpResponse(status=204)

    else:
        logger.error('Failed to upload webcam file.')
        raise Http404('Page not found.')
Exemple #5
0
 def output_path(self):
     return os.path.join(
         wooey_settings.WOOEY_FILE_DIR,
         get_valid_filename(self.user.username if self.user is not None else ""),
         get_valid_filename(
             self.script_version.script.slug
             if not self.script_version.script.save_path
             else self.script_version.script.save_path
         ),
         str(self.uuid),
     )
Exemple #6
0
def sdrfile_name(instance, filename):
    resourcetype = instance.__class__.__name__.lower()
    if resourcetype == Feature.__name__.lower():
        resourcetype = get_valid_filename(
            Truncator(instance.featuretype.name).words(4, truncate=''))
    humanname = get_valid_filename(
        Truncator(instance.sdr.name_short).words(4, truncate=''))
    root, ext = os.path.splitext(filename)
    instancefile = '%s-%s-%s-%s%s' % (instance.sdr_id, humanname, resourcetype,
                                      instance.pk, ext)
    return instancefile
Exemple #7
0
    def test_successful_recipe_path(self, mock_upload_files, mock_create_file_ancestry_links):
        """Tests calling ProductDataFileType.store_files() successfully with a job that is in a recipe"""

        job_exe_in_recipe = job_utils.create_job_exe(status='RUNNING')
        recipe = recipe_utils.create_recipe()
        _recipe_job = recipe_utils.create_recipe_job(recipe=recipe, job_name='My Job', job=job_exe_in_recipe.job)
        remote_base_path_with_recipe = os.path.join('recipes', get_valid_filename(recipe.recipe_type.name),
                                                    get_valid_filename(recipe.recipe_type.version), 'jobs',
                                                    get_valid_filename(job_exe_in_recipe.job.job_type.name),
                                                    get_valid_filename(job_exe_in_recipe.job.job_type.version))

        local_path_1 = os.path.join('my', 'path', 'one', 'my_test.txt')
        media_type_1 = 'text/plain'
        local_path_2 = os.path.join('my', 'path', 'one', 'my_test.json')
        media_type_2 = 'application/json'
        local_path_3 = os.path.join('my', 'path', 'three', 'my_test.png')
        media_type_3 = 'image/png'
        local_path_4 = os.path.join('my', 'path', 'four', 'my_test.xml')
        media_type_4 = None

        # Set up mocks
        def new_upload_files(upload_dir, work_dir, file_entries, input_file_ids, job_exe, workspace):
            results = []
            for file_entry in file_entries:
                # Check base remote path for recipe type and job type information
                self.assertTrue(file_entry[1].startswith(remote_base_path_with_recipe))
                if file_entry[0] == local_path_1:
                    mock_1 = MagicMock()
                    mock_1.id = 1
                    results.append(mock_1)
                elif file_entry[0] == local_path_2:
                    mock_2 = MagicMock()
                    mock_2.id = 2
                    results.append(mock_2)
                elif file_entry[0] == local_path_3:
                    mock_3 = MagicMock()
                    mock_3.id = 3
                    results.append(mock_3)
                elif file_entry[0] == local_path_4:
                    mock_4 = MagicMock()
                    mock_4.id = 4
                    results.append(mock_4)
            return results
        mock_upload_files.side_effect = new_upload_files

        data_files = {self.workspace_1.id: [(local_path_1, media_type_1), (local_path_2, media_type_2)],
                      self.workspace_2.id: [(local_path_3, media_type_3), (local_path_4, media_type_4)]}

        parent_ids = {98, 99}  # Dummy values

        upload_dir = 'upload_dir'
        ProductDataFileStore().store_files(upload_dir, 'work_dir', data_files, parent_ids, job_exe_in_recipe)
    def test_successful_recipe_path(self, mock_upload_files, mock_create_file_ancestry_links):
        """Tests calling ProductDataFileType.store_files() successfully with a job that is in a recipe"""

        job_exe_in_recipe = job_utils.create_job_exe(status='RUNNING')
        recipe = recipe_utils.create_recipe()
        _recipe_job = recipe_utils.create_recipe_job(recipe=recipe, job_name='My Job', job=job_exe_in_recipe.job)
        remote_base_path_with_recipe = os.path.join('recipes', get_valid_filename(recipe.recipe_type.name),
                                                    get_valid_filename(recipe.recipe_type.version), 'jobs',
                                                    get_valid_filename(job_exe_in_recipe.job.job_type.name),
                                                    get_valid_filename(job_exe_in_recipe.job.job_type.version))

        local_path_1 = os.path.join('my', 'path', 'one', 'my_test.txt')
        media_type_1 = 'text/plain'
        local_path_2 = os.path.join('my', 'path', 'one', 'my_test.json')
        media_type_2 = 'application/json'
        local_path_3 = os.path.join('my', 'path', 'three', 'my_test.png')
        media_type_3 = 'image/png'
        local_path_4 = os.path.join('my', 'path', 'four', 'my_test.xml')
        media_type_4 = None

        # Set up mocks
        def new_upload_files(file_entries, input_file_ids, job_exe, workspace):
            results = []
            for file_entry in file_entries:
                # Check base remote path for recipe type and job type information
                self.assertTrue(file_entry[1].startswith(remote_base_path_with_recipe))
                if file_entry[0] == local_path_1:
                    mock_1 = MagicMock()
                    mock_1.id = 1
                    results.append(mock_1)
                elif file_entry[0] == local_path_2:
                    mock_2 = MagicMock()
                    mock_2.id = 2
                    results.append(mock_2)
                elif file_entry[0] == local_path_3:
                    mock_3 = MagicMock()
                    mock_3.id = 3
                    results.append(mock_3)
                elif file_entry[0] == local_path_4:
                    mock_4 = MagicMock()
                    mock_4.id = 4
                    results.append(mock_4)
            return results
        mock_upload_files.side_effect = new_upload_files

        data_files = {self.workspace_1.id: [(local_path_1, media_type_1), (local_path_2, media_type_2)],
                      self.workspace_2.id: [(local_path_3, media_type_3), (local_path_4, media_type_4)]}

        parent_ids = {98, 99}  # Dummy values

        ProductDataFileStore().store_files(data_files, parent_ids, job_exe_in_recipe)
Exemple #9
0
def make_upload_path(instance, filename):
    """Return a string like pdf/2010/08/13/foo_v._var.pdf, with the date set
    as the date_filed for the case."""
    # this code NOT cross platform. Use os.path.join or similar to fix.
    mimetype = filename.split('.')[-1] + '/'

    try:
        path = mimetype + instance.date_filed.strftime("%Y/%m/%d/") + \
               get_valid_filename(filename)
    except AttributeError:
        # The date is unknown for the case. Use today's date.
        path = mimetype + instance.time_retrieved.strftime("%Y/%m/%d/") + \
               get_valid_filename(filename)
    return path
Exemple #10
0
    def _calculate_remote_path(self, job_exe, input_file_ids):
        """Returns the remote path for storing the products

        :param job_exe: The job execution model (with related job and job_type fields) that is storing the files
        :type job_exe: :class:`job.models.JobExecution`
        :param input_file_ids: Set of input file IDs
        :type input_file_ids: set of int
        :returns: The remote path for storing the products
        :rtype: str
        """

        remote_path = ''
        job_recipe = Recipe.objects.get_recipe_for_job(job_exe.job_id)
        if job_recipe:
            recipe = job_recipe.recipe
            recipe_type_path = get_valid_filename(recipe.recipe_type.name)
            recipe_revision = RecipeTypeRevision.objects.get_revision(
                recipe.recipe_type.name,
                recipe.recipe_type.revision_num).revision_num
            recipe_version_path = get_valid_filename(
                'revision_%i' % recipe.recipe_type.revision_num)
            remote_path = os.path.join(remote_path, 'recipes',
                                       recipe_type_path, recipe_version_path)
        job_type_path = get_valid_filename(job_exe.job.job_type.name)
        job_version_path = get_valid_filename(job_exe.job.job_type.version)
        remote_path = os.path.join(remote_path, 'jobs', job_type_path,
                                   job_version_path)

        # Try to use source start time from the job
        the_date = job_exe.job.source_started

        if not the_date:
            # Try to grab source started the old way through the source ancestor file
            for source_file in FileAncestryLink.objects.get_source_ancestors(
                    list(input_file_ids)):
                if source_file.data_started:
                    if not the_date or source_file.data_started < the_date:
                        the_date = source_file.data_started

        # No data start time populated, use current time
        if not the_date:
            remote_path = os.path.join(remote_path, 'unknown_source_data_time')
            the_date = now()

        year_dir = str(the_date.year)
        month_dir = '%02d' % the_date.month
        day_dir = '%02d' % the_date.day
        return os.path.join(remote_path, year_dir, month_dir, day_dir,
                            'job_exe_%i' % job_exe.id)
    def setUp(self):
        django.setup()

        self.workspace_1 = Workspace.objects.create(name='Test workspace 1')
        self.workspace_2 = Workspace.objects.create(name='Test workspace 2', is_active=False)

        interface = {'version': '1.0', 'command': 'my command', 'command_arguments': 'command arguments'}

        job_type = job_utils.create_job_type(name='Type 1', version='1.0', interface=interface)

        event = TriggerEvent.objects.create_trigger_event('TEST', None, {}, now())
        self.job = job_utils.create_job(job_type=job_type, event=event, status='RUNNING', last_status_change=now())
        self.job_exe = job_utils.create_job_exe(job=self.job, status='RUNNING', timeout=1, queued=now())
        self.remote_base_path = os.path.join('jobs', get_valid_filename(self.job.job_type.name),
                                             get_valid_filename(self.job.job_type.version))
    def setUp(self):
        django.setup()

        self.workspace_1 = Workspace.objects.create(name='Test workspace 1')
        self.workspace_2 = Workspace.objects.create(name='Test workspace 2', is_active=False)

        interface = {'version': '1.0', 'command': 'my command'}  

        job_type = job_utils.create_job_type(name='Type 1', version='1.0', interface=interface)

        event = TriggerEvent.objects.create_trigger_event('TEST', None, {}, now())
        self.job = job_utils.create_job(job_type=job_type, event=event, status='RUNNING', last_status_change=now())
        self.job_exe = job_utils.create_job_exe(job=self.job, status='RUNNING', timeout=1, queued=now())
        self.remote_base_path = os.path.join('jobs', get_valid_filename(self.job.job_type.name),
                                             get_valid_filename(self.job.job_type.version))
Exemple #13
0
    async def upload_file(self, request, user, directory):
        if request.method != "POST" or request.content_type != "multipart/form-data":
            AccessError(400, "invalid")

        reader = await request.multipart()
        field = await reader.next()
        if field.name != "file":
            AccessError(400, "invalid")
        fname = text.get_valid_filename(os.path.basename(field.filename))
        name = os.path.join(directory, fname)
        fullname = os.path.join(config.get("global", "basedir"), name)
        if not fname:
            self.redirect(request, user, directory, "invalid filename")
        if os.access(fullname, os.R_OK):
            raise AccessError(403, "duplicate")
        with open(fullname, 'wb') as f:
            while True:
                chunk = await field.read_chunk()  # 8192 bytes by default.
                if not chunk:
                    break
                f.write(chunk)
        log(__name__).info("%s - - File %s uploaded by %s",
                           self.peername(request), name, user)
        size = os.lstat(fullname).st_size
        self.redirect(request, user, directory,
                      "%d bytes saved as %s" % (size, fname))
Exemple #14
0
    def zip_file(self, request, *args, **kwargs):
        socket = self.get_object()
        real_file_list = {
            f_key:
            request.build_absolute_uri(Socket.get_storage().url(f_val['file']))
            for f_key, f_val in socket.file_list.items()
            if not f_key.startswith('<')
        }

        # File list with full urls can get quite big so we pass it through tempfile
        with tempfile.NamedTemporaryFile(delete=False,
                                         suffix='.file_list',
                                         mode="w") as list_file:
            json.dump(real_file_list, list_file)

        try:
            propagate_uwsgi_params(get_tracing_attrs())

            uwsgi.add_var('OFFLOAD_HANDLER',
                          'apps.sockets.handlers.SocketZipHandler')
            uwsgi.add_var('LIST_FILE', list_file.name)
            uwsgi.add_var(
                'FILE_NAME',
                get_valid_filename('{}_{}'.format(socket.name,
                                                  socket.version)))
        except ValueError:
            os.unlink(list_file.name)
            raise UwsgiValueError()
        return HttpResponse()
 def set_file_name(self, file_name):
     path = os.path.join("contents", file_name)
     # get file size if this content was saved individually
     if (self.content_file):
         self.filesize = self.content_file.size
         self.file_name = get_valid_filename(file_name)
     return path
Exemple #16
0
    def __init__(self,
                 input_image_file,
                 input_image_filename,
                 cache_folder=None):
        """
        :Args:
          - self (:class:`BkImageEditor`): BkImageEditor instance.
        """

        self._input_image_file = input_image_file
        self._input_image_filename = get_valid_filename(input_image_filename)
        self._cache_folder = cache_folder

        self._image_width = None
        self._image_height = None
        self._image_translate_x = None
        self._image_translate_y = None
        self._image_flip_x = None
        self._image_flip_y = None
        self._image_rotate_degree = 0
        self._image_contrast = None
        self._image_brightness = None
        self._image_blur = None
        self._image_saturate = None
        self._image_opacity = None

        self._frame_width = None
        self._frame_height = None
Exemple #17
0
def generate_upload_filename(instance, filename):
    return os.path.join(
        settings.JQFILEUPLOAD_UPLOAD_SUBIDRECTORY,
        f"{instance.file_id}",
        get_valid_filename(
            f"{instance.start_byte}-{instance.end_byte}-{filename}"),
    )
Exemple #18
0
    def __init__(self, input_image_file, input_image_filename, cache_folder=None):
        """
        :Args:
          - self (:class:`BkImageEditor`): BkImageEditor instance.
        """

        self._input_image_file = input_image_file
        self._input_image_filename = get_valid_filename(input_image_filename)
        self._cache_folder = cache_folder

        self._image_width = None
        self._image_height = None
        self._image_translate_x = None
        self._image_translate_y = None
        self._image_flip_x = None
        self._image_flip_y = None
        self._image_rotate_degree = 0
        self._image_contrast = None
        self._image_brightness = None
        self._image_blur = None
        self._image_saturate = None
        self._image_opacity = None

        self._frame_width = None
        self._frame_height = None
Exemple #19
0
def open_school(name, records):
    school = find_school(name, records)
    school = school[0]
    filename = 'data/schools/%s.html' % get_valid_filename(school['school_name'])
    with open(filename) as f:
        s1 = BS(f.read())
    return s1
Exemple #20
0
def upload_action(request):
    if request.method == "POST":
        for key in request.FILES.keys():
            new_photo = Photo(photo=request.FILES[key], owner=request.user)
            new_photo.save()
        message = {"status": "success"}
        return HttpResponse(json.dumps(message),
                            content_type="application/json")
    elif request.method == "DELETE":
        try:
            req_data = json.loads(request.body)
            if "filename" in req_data:
                f = get_valid_filename(req_data["filename"])
                files = Photo.objects.filter(photo=f)
                for fileobj in files:
                    if fileobj.display == False:
                        fileobj.delete()
                message = {"status": "success"}
                return HttpResponse(json.dumps(message),
                                    content_type="application/json")
            else:
                return error()
        except ValueError:
            return error()
    return error()
Exemple #21
0
def handle_attachment(message, content, related=False):
#    r = ''
#    if related:
#        r = '(r)'

    filename, encoding = decode_header(content.get_filename())[0]
    if encoding:
        filename = filename.decode(encoding, errors='replace')

    #if not related:
    #    print "saving attachment [%s] of type %s from message %d %s" % (filename, content.get_content_type(), message.id, r)

    a = Attachment()
    a.filename = filename  # TODO need to parse weird strings from this
    if not a.filename:
        a.filename = str(uuid.uuid4())
    a.content_type = content.get_content_type()
    a.stored_location = os.path.join(files_dir, str(message.id), get_valid_filename(a.filename))
        # probably want to fix this too
    a.mime_related = related
        # load the file
    file_content = content.get_payload(decode=1)
    a.file_md5 = hashlib.md5(file_content).hexdigest()  # again, probably a better way to do this than all in memory
    # actually write it do disk - should wrap this in a try except too
    if not os.path.exists(os.path.join(files_dir, str(message.id))):
        os.makedirs(os.path.join(files_dir, str(message.id)))
    with open(a.stored_location, 'wb') as fp:
        fp.write(file_content)
    a.message = message
    a.save()
Exemple #22
0
 def generate_document(self, tempdirectory):
     return pdflatex(base_filename=get_valid_filename(self.description),
                     template=self.template,
                     context=self.__dict__,
                     tempdirectory=tempdirectory,
                     additional_files=self.additional_files
                     )
Exemple #23
0
def pdflatex(base_filename, template, context, tempdirectory, additional_files=None):
    base_filename = get_valid_filename(base_filename)
    tex_file = path.join(tempdirectory, "{}.tex".format(base_filename))
    with open(tex_file, 'w') as tex:
        tex.write(get_template(template).render(Context(context)))

    if additional_files:
        for additional_file in additional_files:
            needed_dir = path.join(tempdirectory, path.dirname(additional_file[1]))
            if not path.isdir(needed_dir):
                mkdir(needed_dir)
            copyfile(path.join(settings.BASE_DIR, additional_file[0]),
                     path.join(tempdirectory, additional_file[1]))

    call(['pdflatex', '-interaction', 'nonstopmode', tex_file], cwd=tempdirectory)

    # return path.join(tempdirectory, "{}.tex".format(base_filename))

    pdf_file = path.join(tempdirectory, "{}.pdf".format(base_filename))
    if path.isfile(pdf_file):
        return pdf_file

    log_file = path.join(tempdirectory, "{}.log".format(base_filename))
    if path.isfile(log_file):
        return log_file
    return None
Exemple #24
0
def parse_rankings(category):
    # open each school file, extract data from html table
    # add to record dict and save all info to new csv file
    list_file = os.path.join('data', category, 'list_%s.csv' % category)
    records = get_records(list_file)

    print('PARSING RANKINGS FOR %s' % category)

    for i, rec in enumerate(records):
        schoolfile = get_valid_filename(rec['school_name'])
        filename = 'data/%s/schools/%s.html' % (category, schoolfile)
                                                
        print('processing %s. %i of %i' % (filename, i+1, len(records)))
        with open(filename) as f:
            s1 = BS(f.read())
        trs = s1.find_all('tr')
        for row in trs[5:]:
            tds = row.find_all('td')
            key = slugify(tds[0].text.strip()).replace('-', '_')
            value = tds[1].text.strip().split('\n')[0]
            rec[key] = value

    with open(list_file, 'w') as outfile:
        writer = csv.DictWriter(outfile, fieldnames=records[0].keys())
        writer.writeheader()
        for rec in records:
            writer.writerow(rec)
Exemple #25
0
def download_song_from_url(self, song_pk):
    Song = apps.get_model('songs.Song')
    SongTask = apps.get_model('songs.SongTask')

    song = Song.objects.get(pk=song_pk)
    task = SongTask.objects.create(task_id=self.request.id,
                                   description="Download song from URL",
                                   song=song)

    outdir = tempfile.mkdtemp()

    opts = dict(**settings.YOUTUBE_DL_OPTS,
                outtmpl=f"{outdir}/%(title)s-%(id)s.%(ext)s")

    with YoutubeDL(opts) as ydl:
        data = ydl.extract_info(song.url)
        filename = f"{data['title']}-{data['id']}.mp3"
        output = os.path.join(outdir, filename)

        with open(output, 'rb') as fd:
            song.file = File(fd, name=get_valid_filename(filename))
            song.name = data['title']
            song.save()

    shutil.rmtree(outdir)
Exemple #26
0
def srt_to_ass_process(video_id, srt_file_dir):
    """
    将中英字幕合并成的srt字幕文件转换为ass格式字幕文件

    :param video_id:
    :param srt_file_dir:
    :return:
    """
    video = Video.objects.get(video_id=video_id)
    ass_filename = '%s-%s.zh-Hans.en.ass' % (get_valid_filename(
        video.title), video_id)

    ass_subs_dir = os.path.join(YOUTUBE_DOWNLOAD_DIR, ass_filename)

    convert_subtilte_format(srt_file_dir, ass_subs_dir)

    # 如果成功生成srt_file_dir文件,则将字幕文件地址返回
    if os.path.isfile(ass_subs_dir):
        video.subtitle_merge = ass_subs_dir
        video.save(update_fields=[
            'subtitle_merge',
        ])
        return ass_subs_dir
    else:
        return False
Exemple #27
0
def _make_package_filename(instance, filename):
    if instance.contest:
        contest_name = instance.contest.id
    else:
        contest_name = 'no_contest'
    return 'package/%s/%s' % (contest_name,
            get_valid_filename(os.path.basename(filename)))
Exemple #28
0
def get_download_filename(title, preset, extension):
    """
    Return a valid filename to be downloaded as.
    """
    filename = "{} ({}).{}".format(title, preset, extension)
    valid_filename = get_valid_filename(filename)
    return valid_filename
Exemple #29
0
    def test_success_new(self, mock_mkdir, mock_getsize, mock_execute):
        """Tests calling SourceFileManager.store_file() successfully with a new source file"""
        def new_getsize(path):
            return 100
        mock_getsize.side_effect = new_getsize

        work_dir = 'work'
        workspace = storage_utils.create_workspace()
        remote_path = u'my/remote/path/file.txt'
        local_path = u'my/local/path/file.txt'
        workspace.cleanup_upload_dir = MagicMock()
        workspace.upload_files = MagicMock()
        workspace.setup_upload_dir = MagicMock()
        workspace.delete_files = MagicMock()
        
        wksp_upload_dir = os.path.join(work_dir, 'upload')
        wksp_work_dir = os.path.join(work_dir, 'work', 'workspaces', get_valid_filename(workspace.name))

        src_file = SourceFile.objects.store_file(work_dir, local_path, [], workspace, remote_path)

        workspace.upload_files.assert_called_once_with(wksp_upload_dir, wksp_work_dir, [('file.txt', remote_path)])
        self.assertListEqual(workspace.delete_files.call_args_list, [])

        self.assertEqual(u'file.txt', src_file.file_name)
        self.assertEqual(u'3d8e577bddb17db339eae0b3d9bcf180', src_file.uuid)
        self.assertEqual(remote_path, src_file.file_path)
        self.assertEqual(u'text/plain', src_file.media_type)
        self.assertEqual(workspace.id, src_file.workspace_id)
Exemple #30
0
    def _save(self, name, content):
        """
        Lifted partially from django/core/files/storage.py
        """
        full_path = self.path(name)

        directory = os.path.dirname(full_path)
        if not os.path.exists(directory):
            os.makedirs(directory)
        elif not os.path.isdir(directory):
            raise IOError("%s exists and is not a directory." % directory)

        # This file has a file path that we can move.
        if hasattr(content, 'temporary_file_path'):
            temp_data_location = content.temporary_file_path()
        else:
            tmp_prefix = "tmp_%s" % (get_valid_filename(name), )
            temp_data_location = tempfile.mktemp(prefix=tmp_prefix,
                                                 dir=self.location)
            try:
                # This is a normal uploadedfile that we can stream.
                # This fun binary flag incantation makes os.open throw an
                # OSError if the file already exists before we open it.
                fd = os.open(
                    temp_data_location, os.O_WRONLY | os.O_CREAT | os.O_EXCL
                    | getattr(os, 'O_BINARY', 0))
                locks.lock(fd, locks.LOCK_EX)
                for chunk in content.chunks():
                    os.write(fd, chunk)
                locks.unlock(fd)
                os.close(fd)
            except Exception, e:
                if os.path.exists(temp_data_location):
                    os.remove(temp_data_location)
                raise
def make_recap_path(instance, filename):
    """Make a path to a good location on the local system for RECAP files.

    This dumps them all into the same directory, which seems to be OK, at least
    so far.
    """
    return "recap/%s" % get_valid_filename(filename)
Exemple #32
0
def _make_package_filename(instance, filename):
    if instance.contest:
        contest_name = instance.contest.id
    else:
        contest_name = 'no_contest'
    return 'package/%s/%s' % (contest_name,
                              get_valid_filename(os.path.basename(filename)))
def make_recap_path(instance, filename):
    """Make a path to a good location on the local system for RECAP files.

    This dumps them all into the same directory, which seems to be OK, at least
    so far.
    """
    return "recap/%s" % get_valid_filename(filename)
Exemple #34
0
def _backup_handler(packet, encryption_passphrase=None):
    """Performs backup operation on packet."""
    if not os.path.exists(MANIFESTS_FOLDER):
        os.makedirs(MANIFESTS_FOLDER)

    manifest_path = os.path.join(MANIFESTS_FOLDER,
                                 get_valid_filename(packet.path) + '.json')
    manifests = {
        'apps': [{
            'name': component.app.app_id,
            'version': component.app.info.version,
            'backup': component.manifest
        } for component in packet.components]
    }
    with open(manifest_path, 'w') as manifest_file:
        json.dump(manifests, manifest_file)

    paths = packet.directories + packet.files
    paths.append(manifest_path)
    arguments = ['create-archive', '--path', packet.path]
    if packet.archive_comment:
        arguments += ['--comment', packet.archive_comment]

    arguments += ['--paths'] + paths
    input_data = ''
    if encryption_passphrase:
        input_data = json.dumps(
            {'encryption_passphrase': encryption_passphrase})

    actions.superuser_run('backups', arguments, input=input_data.encode())
Exemple #35
0
    def test_fails(self, mock_makedirs, mock_getsize):
        """Tests calling ScaleFileManager.upload_files() when Workspace.upload_files() fails"""
        def new_getsize(path):
            return 100
        mock_getsize.side_effect = new_getsize

        upload_dir = os.path.join('upload', 'dir')
        work_dir = os.path.join('work', 'dir')

        workspace = storage_test_utils.create_workspace()
        file_1 = ScaleFile()
        file_1.media_type = None  # Scale should auto-detect text/plain
        remote_path_1 = 'my/remote/path/file.txt'
        local_path_1 = 'my/local/path/file.txt'
        file_2 = ScaleFile()
        file_2.media_type = 'application/json'
        remote_path_2 = 'my/remote/path/2/file.json'
        local_path_2 = 'my/local/path/2/file.json'
        workspace.upload_files = MagicMock()
        workspace.upload_files.side_effect = Exception
        workspace.delete_files = MagicMock()
        delete_work_dir = os.path.join(work_dir, 'delete', get_valid_filename(workspace.name))

        files = [(file_1, local_path_1, remote_path_1), (file_2, local_path_2, remote_path_2)]
        self.assertRaises(Exception, ScaleFile.objects.upload_files, upload_dir, work_dir, workspace, files)
Exemple #36
0
def create_zipname(user, query):
    """
    Returns a (valid) filename for the zipfile containing the exported data.
    """
    date_created = query.date_created.strftime('%Y.%m.%d-%H.%M.%S')
    return get_valid_filename('_'.join(
        [user.username, query.title, date_created]))
Exemple #37
0
def pdflatex(base_filename,
             template,
             context,
             tempdirectory,
             additional_files=None):
    base_filename = get_valid_filename(base_filename)
    tex_file = path.join(tempdirectory, "{}.tex".format(base_filename))
    with open(tex_file, 'w') as tex:
        tex.write(get_template(template).render(Context(context)))

    if additional_files:
        for additional_file in additional_files:
            needed_dir = path.join(tempdirectory,
                                   path.dirname(additional_file[1]))
            if not path.isdir(needed_dir):
                mkdir(needed_dir)
            copyfile(path.join(settings.BASE_DIR, additional_file[0]),
                     path.join(tempdirectory, additional_file[1]))

    call(['pdflatex', '-interaction', 'nonstopmode', tex_file],
         cwd=tempdirectory)

    # return path.join(tempdirectory, "{}.tex".format(base_filename))

    pdf_file = path.join(tempdirectory, "{}.pdf".format(base_filename))
    if path.isfile(pdf_file):
        return pdf_file

    log_file = path.join(tempdirectory, "{}.log".format(base_filename))
    if path.isfile(log_file):
        return log_file
    return None
 def _save(self, name, content):
     """
     Lifted partially from django/core/files/storage.py
     """ 
     full_path = self.path(name)
         
     directory = os.path.dirname(full_path)
     if not os.path.exists(directory):        
         os.makedirs(directory)
     elif not os.path.isdir(directory):
         raise IOError("%s exists and is not a directory." % directory)
             
     # This file has a file path that we can move.
     if hasattr(content, 'temporary_file_path'):
         temp_data_location = content.temporary_file_path()
     else:   
         tmp_prefix = "tmp_%s" %(get_valid_filename(name), )
         temp_data_location = tempfile.mktemp(prefix=tmp_prefix,
                                              dir=self.location)
         try:
             # This is a normal uploadedfile that we can stream.
             # This fun binary flag incantation makes os.open throw an
             # OSError if the file already exists before we open it.
             fd = os.open(temp_data_location,
                          os.O_WRONLY | os.O_CREAT |
                          os.O_EXCL | getattr(os, 'O_BINARY', 0))
             locks.lock(fd, locks.LOCK_EX)
             for chunk in content.chunks():
                 os.write(fd, chunk)
             locks.unlock(fd)
             os.close(fd)
         except Exception, e:
             if os.path.exists(temp_data_location):
                 os.remove(temp_data_location)
             raise
Exemple #39
0
 def save(self, request, obj, *args, **kwargs):
     self.instance.creator = request.user
     self.instance.content_type = ContentType.objects.get_for_model(obj)
     self.instance.object_id = obj.pk
     self.instance.name = get_valid_filename(
         self.instance.attachment_file.name)
     super(AttachmentForm, self).save(*args, **kwargs)
    def do(self, options):
        self.directory = options.get("directory")
        self.mode = options.get("mode")

        if self.mode == "export" and not os.path.isdir(self.directory):
            os.mkdir(self.directory)
            self.logger.info('Directory created: %s' % self.directory)
        elif self.mode == "import" and os.path.isdir(self.directory):
            files = {
                os.path.splitext(get_valid_filename(f))[0].upper(): f
                for f in os.listdir(self.directory)
                if os.path.isfile(os.path.join(self.directory, f))
            }
            self.logger.info('Directory found: %s' % self.directory)
        else:
            raise CommandError('Please check mode (%s) or directory (%s)' %
                               (self.mode, self.directory))

        self.totalrows = self.model.objects.all().count()
        self.inf = 0
        for obj in self.model.objects.all():
            self.current_row += 1
            if self.pbar:
                self.progressBar(self.current_row, self.totalrows)
            else:
                self.logger.info(
                    'Update object %s/%s "%s"' %
                    (self.current_row, self.totalrows, obj.display))

            if self.mode == "export" and obj.valid_imagename not in self.excludes:
                self.export_image(obj)
            else:
                self.import_image(obj, files)
        print(self.inf)
Exemple #41
0
def download_latest_forecasts(request, pk):
    """
    :return: `latest_forecast_cols_for_project()` output as CSV. for now just does returns a list of the 2-tuples:
        (Forecast.id, Forecast.source), but later may generalize to allow passing specific columns in `request`
    """
    project = get_object_or_404(Project, pk=pk)
    if (not request.user.is_authenticated) or not is_user_ok_view_project(request.user, project):
        return HttpResponseForbidden()

    response = HttpResponse(content_type='text/csv')
    csv_filename = get_valid_filename(f"project-{project.name}-latest-forecasts.csv")
    response['Content-Disposition'] = 'attachment; filename="{}"'.format(str(csv_filename))

    # for now just does returns a list of the 2-tuples: (Forecast.id, Forecast.source)
    rows = latest_forecast_cols_for_project(project, is_incl_fm_id=False, is_incl_tz_id=False,
                                            is_incl_issued_at=False, is_incl_created_at=False,
                                            is_incl_source=True, is_incl_notes=False)
    writer = csv.writer(response)
    writer.writerow(['forecast_id', 'source'])  # header

    # process rows, cleaning up for csv:
    # - [maybe later] render date and datetime objects as strings: 'issued_at', 'created_at'
    # - remove \n from free form text: 'source', [maybe later] 'notes'
    for f_id, source in rows:
        writer.writerow([f_id, source.replace('\n', '_')])

    return response
Exemple #42
0
def _download_job_data_request(job):
    """
    :param job: a Job
    :return: the data file corresponding to `job` as a CSV file
    """
    # imported here so that tests can patch via mock:
    from utils.cloud_file import download_file, _file_name_for_object


    with tempfile.TemporaryFile() as cloud_file_fp:  # <class '_io.BufferedRandom'>
        try:
            download_file(job, cloud_file_fp)
            cloud_file_fp.seek(0)  # yes you have to do this!

            # https://stackoverflow.com/questions/16538210/downloading-files-from-amazon-s3-using-django
            csv_filename = get_valid_filename(f'job-{_file_name_for_object(job)}-data.csv')
            wrapper = FileWrapper(cloud_file_fp)
            response = HttpResponse(wrapper, content_type='text/csv')
            # response['Content-Length'] = os.path.getsize('/tmp/'+fname)
            response['Content-Disposition'] = 'attachment; filename="{}"'.format(str(csv_filename))
            return response
        except (BotoCoreError, Boto3Error, ClientError, ConnectionClosedError) as aws_exc:
            logger.debug(f"download_job_data(): AWS error: {aws_exc!r}. job={job}")
            return HttpResponseNotFound(f"AWS error: {aws_exc!r}, job={job}")
        except Exception as ex:
            logger.debug(f"download_job_data(): error: {ex!r}. job={job}")
            return HttpResponseNotFound(f"error downloading job data. ex={ex!r}, job={job}")
Exemple #43
0
def newFolder(parent):
    name = get_valid_filename(
        simpledialog.askstring("Input",
                               "Please enter the file name:",
                               parent=parent))
    if not os.path.exists(name):
        os.makedirs(name)
    def import_image(self, obj, files):
        found = 0
        sfile = get_valid_filename(obj)
        if sfile in files:
            ffile = files[sfile]
        elif sfile.split("_")[0] in files:
            ffile = files[sfile.split("_")[0]]
        elif "".join(sfile.split("_")) in files:
            ffile = files["".join(sfile.split("_"))]
        else:
            found = 1
            splitfile = re.split(r"[, \-!?:_]+", sfile)
            for sf in splitfile:
                if len(sf) > 3:
                    for f in files:
                        if sf in f or self.similar_text(sf, f) > 80:
                            ffile = files[f]
                            found = 0
                            break

        if found:
            self.error.add('Logo', 'not found for: %s' % sfile)
            self.inf += found
        else:
            tfile = File(open("%s%s" % (self.directory, ffile), 'rb'))
            obj.image.save(ffile, tfile)
Exemple #45
0
def add_subtitle_to_video_process(video_id, mode,sub_lang_type='zh-Hans'):
    """
    将video_id对应的视频的vtt字幕转为ass格式,然后硬入到对应的视频中

    :param video_id:
    :param subtitle_type: (en,zh-Hans,zh-Hans_en)
    :param mode:指定使用soft还是使用hard的模式将字幕写入视频文件
    :return:
    """
    video = Video.objects.get(pk=video_id)

    # 如果要求写入的中文字幕,而且中文字幕vtt存在
    # 则将中文vtt字幕先转为srt,再转为ass,添加式样
    if sub_lang_type == 'zh-Hans' and video.subtitle_cn.name:
        subtitle_file = video.subtitle_cn.path

        ass_filename = '%s-%s.zh-Hans.ass' % (
            get_valid_filename(video.title), video_id)

        ass_subs_dir = os.path.join(YOUTUBE_DOWNLOAD_DIR, ass_filename)
        # 则将中文vtt字幕先转为srt,再转为ass,添加式样
        subtitle_file = convert_subtilte_format(subtitle_file, ass_subs_dir)

        subtitle_file = edit_cn_ass_subtitle_style(subtitle_file)
        # youtube上的 英文vtt字幕包含格式,导致转换成srt字幕再和中文srt字幕合并后有代码
        # 暂时不知道该如何处理,所以只合并中文字幕到视频
    # elif sub_lang_type == 'en' and video.subtitle_en.name:
    #     subtitle_file = video.subtitle_en.path
    elif sub_lang_type == 'zh-Hans_en' and video.subtitle_merge.name:
        # 如果要求写入的中文和英文的合并字幕,而且合并字幕存在
        subtitle_file = video.subtitle_merge.path
    else:
        # 如果获取不到subtitle_file,则返回False
        return False

    if (video.file.name):
        # 获取到视频文件名称
        file_basename = os.path.basename(video.file.path)
    else:
        return False

    # 将文件名称分割为名称和后缀
    file_basename_list = os.path.splitext(file_basename)
    subtitle_video = file_basename_list[0] + '.' + sub_lang_type + \
                     file_basename_list[1]

    # 加入字幕的视频文件保存到YOUTUBE_DOWNLOAD_DIR 目录下
    subtitle_video = os.path.join(YOUTUBE_DOWNLOAD_DIR, subtitle_video)

    result = add_subtitle_to_video(video.file.path, subtitle_file,
                                   subtitle_video, mode)
    if result == True and os.path.exists(subtitle_video):
        # 如何将字幕合并到视频成功,则保存视频文件地址到Video module中
        video.subtitle_video_file = subtitle_video
        video.save(update_fields=['subtitle_video_file'])
        return True
    else:
        print(result)
        return False
Exemple #46
0
def make_problem_filename(instance, filename):
    if not isinstance(instance, Problem):
        assert hasattr(instance, 'problem'), 'problem_file_generator used ' \
                'on object %r which does not have \'problem\' attribute' \
                % (instance,)
        instance = getattr(instance, 'problem')
    return 'problems/%d/%s' % (instance.id,
            get_valid_filename(os.path.basename(filename)))
Exemple #47
0
def make_contest_filename(instance, filename):
    if not isinstance(instance, Contest):
        assert hasattr(instance, 'contest'), 'contest_file_generator used ' \
                'on object %r which does not have \'contest\' attribute' \
                % (instance,)
        instance = getattr(instance, 'contest')
    return 'contests/%s/%s' % (instance.id,
            get_valid_filename(os.path.basename(filename)))
Exemple #48
0
 def get_download_filename(self):
     """
     Return a valid filename to be downloaded as.
     """
     title = self.contentnode.title
     filename = "{} ({}).{}".format(title, self.get_preset(), self.extension)
     valid_filename = get_valid_filename(filename)
     return valid_filename
def _get_unique_filename(name, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs"):
    fs = GridFS(get_db(db_alias), collection_name)
    file_root, file_ext = os.path.splitext(get_valid_filename(name))
    count = itertools.count(1)
    while fs.exists(filename=name):
        # file_ext includes the dot.
        name = os.path.join("%s_%s%s" % (file_root, next(count), file_ext))
    return name
def export(test=True):
    data = gather_data_and_update_flags(test)
    if data == []:
        raise Exception(u"%s" % _(u"No data to export!"))
    elif not data:
        logger.error(u"%s" % _(u"Error when fetching data - Data empty."))
        raise Exception(u"%s" % _(u"Error when fetching data"
                                  u" Export cancelled!"))

    # File name
    if test:
        filename = get_valid_filename("test_%s.csv" % date.today())
    else:
        filename = get_valid_filename("%s.csv" % date.today())
    mediafilepath = join("invoices/export/", filename)
    filepath = join(settings.MEDIA_ROOT, mediafilepath)

    # Ensure that we got an available file name
    fss = FileSystemStorage()
    filepath = fss.get_available_name(filepath)

    # If the file name change, we update the media file path
    filename = split(filepath)[1]
    mediafilepath = join("invoices/export/", filename)

    # Write the file on the FS, and create the Export object if we are not in
    # test mode
    with open(filepath, "w") as csvfile:
        exportwriter = csv.writer(csvfile, delimiter=';',
                                  quoting=csv.QUOTE_ALL)

        # We do this because CSV doesn't support directly Unicode and UTF-8
        # http://docs.python.org/2/library/csv.html#examples
        for row in data:
            r = []
            for field in row:
                r.append(("%s" % field).strip().encode("utf-8"))
            exportwriter.writerow(r)

        if not test:
            export = Export(date=datetime.today(),
                            file=mediafilepath)
            export.save()

    return settings.MEDIA_URL + mediafilepath
Exemple #51
0
    def _save(self, name, content):
        """
        Lifted partially from django/core/files/storage.py
        """
        full_path = self.path(name)

        # Create any intermediate directories that do not exist.
        # Note that there is a race between os.path.exists and os.makedirs:
        # if os.makedirs fails with EEXIST, the directory was created
        # concurrently, and we can continue normally. Refs #16082.
        directory = os.path.dirname(full_path)
        if not os.path.exists(directory):
            try:
                os.makedirs(directory)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
        if not os.path.isdir(directory):
            raise IOError("%s exists and is not a directory." % directory)

            # There's a potential race condition between get_available_name and
            # saving the file; it's possible that two threads might return the
            # same name, at which point all sorts of fun happens. So we need to
            # try to create the file, but if it already exists we have to go back
            # to get_available_name() and try again.

            # This file has a file path that we can move.
        if hasattr(content, 'temporary_file_path'):
            temp_data_location = content.temporary_file_path()
        else:
            tmp_prefix = "tmp_%s" % (get_valid_filename(name), )
            temp_data_location = tempfile.mktemp(prefix=tmp_prefix,
                                                 dir=self.location)
            try:
                # This is a normal uploadedfile that we can stream.
                # This fun binary flag incantation makes os.open throw an
                # OSError if the file already exists before we open it.
                fd = os.open(temp_data_location,
                             os.O_WRONLY | os.O_CREAT |
                             os.O_EXCL | getattr(os, 'O_BINARY', 0))
                locks.lock(fd, locks.LOCK_EX)
                for chunk in content.chunks():
                    os.write(fd, chunk)
                locks.unlock(fd)
                os.close(fd)
            except Exception:
                if os.path.exists(temp_data_location):
                    os.remove(temp_data_location)
                raise

        file_move_safe(temp_data_location, full_path, allow_overwrite=True)
        content.close()

        if settings.FILE_UPLOAD_PERMISSIONS is not None:
            os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)

        return name
Exemple #52
0
    def _get_delete_work_dir(self, work_dir, workspace):
        """Returns a work sub-directory used to delete files from the given workspace

        :param work_dir: Absolute path to a local work directory available to Scale
        :type work_dir: str
        :param workspace: The workspace
        :type workspace: :class:`storage.models.Workspace`
        """

        return os.path.join(self._get_delete_root_dir(work_dir), get_valid_filename(workspace.name))
Exemple #53
0
    def _get_workspace_work_dir(self, work_dir, workspace):
        '''Returns a work sub-directory for the given workspace

        :param work_dir: Absolute path to a local work directory available to Scale
        :type work_dir: str
        :param workspace: The workspace
        :type workspace: :class:`storage.models.Workspace`
        '''

        return os.path.join(os.path.normpath(work_dir), 'workspaces', get_valid_filename(workspace.name))
Exemple #54
0
 def clean_projectname(self):
     dirname = "%05d_%s"%(self.user.pk, get_valid_filename(self.cleaned_data['projectname']))
     logger.debug(dirname)
     logger.debug(self.user)
     try:
         prj = Project.objects.get(owner = self.user, directory = dirname)
         logger.debug('exists')
         raise forms.ValidationError("project already exists")
     except Project.DoesNotExist:
         return self.cleaned_data['projectname']
def _upload_to(filename):
    upload_path = getattr(settings, 'MULTIUPLOADER_FILES_FOLDER', DEFAULTS.MULTIUPLOADER_FILES_FOLDER)

    if upload_path[-1] != '/':
        upload_path += '/'

    filename = get_valid_filename(os.path.basename(filename))
    filename, ext = os.path.splitext(filename)
    hash = sha1(str(time.time())).hexdigest()
    fullname = os.path.join(upload_path, "%s.%s%s" % (filename, hash, ext))
    return fullname
Exemple #56
0
    def _upload_to(instance, filename):
        upload_path = getattr(settings, 'BUCKET_FILES_FOLDER')

        if upload_path[-1] != '/':
            upload_path += '/'

        filename = get_valid_filename(os.path.basename(filename))
        filename, ext = os.path.splitext(filename)
        hash = sha1(str(time.time())).hexdigest()
        fullname = os.path.join(upload_path, "%s%s" % (hash, ext))
        return fullname
Exemple #57
0
def get_attachment_file_path(instance, filename):
    basename = path.basename(filename)
    basename = get_valid_filename(basename)

    hs = hashlib.sha256()
    hs.update(force_bytes(timezone.now().isoformat()))
    hs.update(os.urandom(1024))

    p1, p2, p3, p4, *p5 = split_by_n(hs.hexdigest(), 1)
    hash_part = path.join(p1, p2, p3, p4, "".join(p5))

    return path.join("attachments", hash_part, basename)
Exemple #58
0
def cumulus_upload_to(self, filename):
    """
    Simple, custom upload_to because Cloud Files doesn't support
    nested containers (directories).

    Actually found this out from @minter:
    @richleland The Cloud Files APIs do support pseudo-subdirectories, by
    creating zero-byte files with type application/directory.

    May implement in a future version.
    """
    return get_valid_filename(filename)