def handle(self, *args, **options):
        ##########
        # 1. Save the email-attachment file to the django model (database)
        ##########
        sticker_email_host = env('STICKER_EMAIL_HOST', '')
        sticker_email_port = env('STICKER_EMAIL_PORT', '')
        sticker_email_username = env('STICKER_EMAIL_USERNAME', '')
        sticker_email_password = env('STICKER_EMAIL_PASSWORD', '')

        if not sticker_email_host or not sticker_email_port or not sticker_email_username or not sticker_email_password:
            msg = 'Configure email settings at .env to process sticker responses'
            logger.error(msg)
            cron_email.info(msg)
            raise Exception(msg)

        context = ssl.create_default_context()
        imapclient = imaplib.IMAP4_SSL(sticker_email_host, sticker_email_port, ssl_context=context)

        # login
        imapclient.login(sticker_email_username, sticker_email_password)

        """
        Retrieve messages
        """
        imapclient.select('INBOX')  # Select mail box
        typ, data = imapclient.search(None, "ALL")  # data = [b"1 2 3 4 ..."]
        datas = data[0].split()
        fetch_num = 5000  # The number of messages to fetch

        if (len(datas) - fetch_num) < 0:
            fetch_num = len(datas)

        for num in datas[len(datas) - fetch_num::]:
            try:
                ##########
                # 2. Save the attached files into the database
                ##########
                typ, data = imapclient.fetch(num, '(RFC822)')
                # typ, data = imapclient.fetch(num, '(BODY[HEADER.FIELDS (MESSAGE-ID)])')
                raw_email = data[0][1]

                # converts byte literal to string removing b''
                raw_email_string = raw_email.decode('utf-8')
                email_message = email.message_from_string(raw_email_string)

                email_message_id = str(make_header(decode_header(email_message["Message-ID"])))
                if StickerPrintingResponseEmail.objects.filter(email_message_id=email_message_id):
                    # This email has been saved in the database already.  Skip this email
                    continue

                email_subject = str(make_header(decode_header(email_message["Subject"])))
                email_from = email_message['From']
                body = get_text(email_message)
                email_body = body.decode()
                email_date = email_message['Date']

                sticker_printing_response_email = StickerPrintingResponseEmail.objects.create(
                    email_subject=email_subject,
                    email_from=email_from,
                    email_body=email_body,
                    email_date=email_date,
                    email_message_id=email_message_id,
                )

                # downloading attachments
                for part in email_message.walk():
                    try:
                        # this part comes from the snipped I don't understand yet...
                        if part.get_content_maintype() == 'multipart':
                            continue
                        if part.get('Content-Disposition') is None:
                            continue
                        fileName = part.get_filename()
                        if bool(fileName) and fileName.lower().endswith('.xlsx'):
                            now = timezone.localtime(timezone.now())

                            # Create sticker_printing_response object. File is not saved yet
                            sticker_printing_response = StickerPrintingResponse.objects.create(
                                sticker_printing_response_email=sticker_printing_response_email,
                                name=fileName,
                            )

                            # Load attachment file
                            my_bytes = part.get_payload(decode=True)
                            content_file = ContentFile(my_bytes)

                            # Save file
                            sticker_printing_response._file.save(fileName, content_file)
                    except Exception as e:
                        logger.exception('Exception has been raised when importing .xlsx file')
                        continue

                # imapclient.store(num, "+FLAGS", "\\Deleted")
                imapclient.copy(num, "Archive")
                imapclient.store(num, "+FLAGS", "\\Deleted")
            except:
                logger.exception('Exception has been raised when processing emails')
                continue

        imapclient.close()
        imapclient.logout()

        ##########
        # 3. Process xlsx file saved in django model
        ##########
        process_summary = {'stickers': [], 'errors': [], 'sticker_printing_responses': []}  # To be used for sticker processed email
        updates, errors = process_sticker_printing_response(process_summary)

        # Send sticker import batch emails
        send_sticker_import_batch_email(process_summary)

        cmd_name = __name__.split('.')[-1].replace('_', ' ').upper()
        # error_count = len(errors) + len(error_filenames)
        # error_count = len(errors)
        # err_str = '<strong style="color: red;">Errors: {}</strong>'.format(error_count) if error_count else '<strong style="color: green;">Errors: 0</strong>'
        # msg = '<p>{} completed. {}. IDs updated: {}.</p>'.format(cmd_name, err_str, updates)
        msg = construct_email_message(cmd_name, errors, updates)
        logger.info(msg)
        cron_email.info(msg)
Ejemplo n.º 2
0
    def post(self, request, filename, format=None):
        try:
            from data_handler.models import (DataFile, DataHandlerSession)
            data_file = DataFile.objects.get(member=request.user)
            dfile = request.FILES['donor_file']
            base_file_content = ContentFile(dfile.read())
            # this to make file name unique
            file_id = uuid.uuid4()
            new_file_name_id = f"{file_id.time_hi_version}-{dfile.name}"

            path = default_storage.save(f"data/{new_file_name_id}",
                                        base_file_content)
            # this step to save the base path with the old data type without convert the dtypes of the columns
            unique_user_data_file_ids = f"{str(data_file.id)}_{str(data_file.member.id)}"
            signer = Signer(algorithm='md5')
            base_path_name = f"{unique_user_data_file_ids}_{uuid.uuid4().hex[:6].upper()}_{dfile.name}"
            base_path = default_storage.save(f"data/base/{base_path_name}",
                                             base_file_content)
            tmp_base_path = os.path.join(settings.MEDIA_ROOT, base_path)
            tmp_file = os.path.join(settings.MEDIA_ROOT, path)
            row_count = get_row_count(
                tmp_file)  # get total rows of the uploaded file

            # first check if the file empty or not
            if check_empty_df(tmp_file) is True:
                resp = {
                    "is_allowed": False,
                    "row_count": row_count,
                    "msg": "The file is empty please re-upload correct file",
                    "is_empty": True
                }
                delete_data_file(tmp_file)
                delete_data_file(tmp_base_path)
                # delete_all_member_data_file_info(data_file)
                return Response(resp, status=200)
            else:
                # here the file not empty
                save_data_file_rounded(tmp_file)
                remove_spaces_from_columns_names(tmp_base_path)
                columns = extract_all_columns_with_dtypes(
                    tmp_file)  # extract the columns from the uploaded file
                params = request.POST.get('parameters')
                session_label = request.POST.get('session-label')
                file_name = request.POST.get('file_name')
                data_or_num = check_data_or_num(params)
                if isinstance(data_or_num, int) is True:
                    member_data_session = DataHandlerSession.objects.get(
                        data_handler_id=data_file, pk=data_or_num)
                    member_data_session.data_file_path = tmp_file
                    member_data_session.base_data_file_path = tmp_base_path
                    member_data_session.file_name = file_name
                    member_data_session.data_handler_session_label = session_label
                    member_data_session.save()
                else:
                    all_main_columns_dtypes = extract_all_columns_with_dtypes(
                        tmp_file)
                    all_main_cols_str = ""
                    for key, value in all_main_columns_dtypes.items():
                        all_main_cols_str += f"{key}:{value}|"
                    member_data_session = DataHandlerSession.objects.create(
                        data_handler_id=data_file,
                        data_file_path=tmp_file,
                        file_upload_procedure="local_file",
                        all_records_count=row_count,
                        data_handler_session_label=session_label,
                        file_name=file_name,
                        all_columns_with_dtypes=all_main_cols_str,
                        base_data_file_path=tmp_base_path)
                    data_file.last_uploaded_session = member_data_session.pk
                    data_file.save()

                # cprint(member_data_session.pk, 'yellow')
                # check if the member has previous session before

                # save the file path after upload it into the db

                if row_count > data_file.allowed_records_count:
                    # return Response("Columns count bigger than the allowed")
                    resp = {"is_allowed": False, "row_count": row_count}
                    return Response(resp, status=200)
                else:
                    resp = {
                        "is_allowed": True,
                        "columns": columns,
                        "row_count": row_count
                    }
                    # print(columns)
                    return Response(resp, status=200)

        except Exception as ex:
            cprint(traceback.format_exc(), 'red')
            log_exception(traceback.format_exc())
Ejemplo n.º 3
0
 def setUp(self):
     cfg_file = ConfigurationFile()
     cfg_file.slug = cfg_file.LOGO
     # FileField.save() also saves the model instance
     cfg_file.content.save("sample.svg", ContentFile(sample_svg))
     self.cfg_file = cfg_file
Ejemplo n.º 4
0
def get_test_file(file_name):
    log_file = File(
        open(normpath(join(dirname(abspath(__file__)), 'files/' + file_name)),
             'rb'))

    return ContentFile(log_file.read(), file_name)
Ejemplo n.º 5
0
 def test_content_file_custom_name(self):
     """
     The constructor of ContentFile accepts 'name' (#16590).
     """
     name = "I can have a name too!"
     self.assertEqual(ContentFile(b"content", name=name).name, name)
Ejemplo n.º 6
0
def get(request, experiment_id=0):
    if not request.user.is_authenticated:
        return HttpResponseRedirect(reverse('index:index'))

    if experiment_id:
        try:
            experiment = Experiment.objects.get(id=experiment_id)
        except Exception:
            #raise Http404("Experiment does not exist")
            return HttpResponseRedirect(reverse('experiment:experiment_stock'))

    file_item = File.objects.filter(experiment_id=experiment.id, file__regex=SAMPLE_INFO_PATTERN)
    if not file_item:
        sample_info = ContentFile(makeSampleinfo(experiment))
        sample_info.name = SAMPLE_INFO_FILENAME
        sample_info_file = File(experiment_id=experiment.id, file=sample_info)
        sample_info_file.save()

    file_item = File.objects.filter(experiment_id=experiment.id).first()

    input_path = SITE_PATH + re.sub(r'/[^/]*$', '/', file_item.file.url)
    print(input_path)
    output_path = input_path + re.sub(r'/$', '', pipelne_settings.OUT_DIRNAME)
    log_file = output_path + '/' + pipelne_settings.LOG_FILE
    compressed_file = input_path + pipelne_settings.OUT_FILE
    cmd_array = [pipelne_settings.PYTHON_PATH,
                 pipelne_settings.PIPER_PATH,
                 '-m', pipelne_settings.MAX_MEMORY,
                 '-i', input_path,
                 '-o', output_path,
                 '-l', log_file,
                 '-z', compressed_file,
                 '-p', str(pipelne_settings.PORT),
                 ]

    if request.POST.get('overseqThreshold', False):
        cmd_array.append('-f')
        cmd_array.append(request.POST['overseqThresholdValue'])

    if request.POST.get('collisionFilter', False):
        cmd_array.append('-c')

    if request.POST.get('rmSequences', False):  # remove sequences from the output
        cmd_array.append('-r')

    cmd_string = ' '.join(cmd_array)

    taskRemover()  # remove completed tasks
    new_task = None
    for k in range(pipelne_settings.THREADS):  # find an empty slot for the new task
        task_in_process = TaskQueue.objects.filter(thread=k).first()
        if not task_in_process:
            try:
                new_task = TaskQueue(experiment_id=experiment.id,
                                     thread=k,
                                     cmd=cmd_string,
                                     output_file=compressed_file
                                     )
                new_task.save()
            except Exception:
                continue
            else:
                break
    if new_task:
        if os.path.isfile(experiment.output_file):
            try:
                os.remove(experiment.output_file)  # remove the old output file
            except Exception:
                raise Http404("Can't remove the old output file!\n")
        try:
            process = subprocess.Popen(cmd_array, stdin=None, stdout=None, stderr=None, shell=False, close_fds=True)
        except Exception:
            raise Http404("Can't run a new process!")
        else:
            try:
                new_task.pid = process.pid
                new_task.save()
            except Exception:
                raise Http404("Can't update the process status!")
            else:
                try:
                    experiment.output_file = compressed_file
                    experiment.output_dir = output_path
                    experiment.output_status = 'ongoing'
                    experiment.save()
                except Exception:
                    raise Http404("Can't modify the experiment status!")
    else:
        try:
            experiment.output_file = compressed_file
            experiment.output_dir = output_path
            experiment.output_status = 'waiting'
            experiment.save()
        except Exception:
            raise Http404("Can't modify the experiment status!")

    return HttpResponseRedirect(reverse('experiment:experiment_stock'))
Ejemplo n.º 7
0
    def _get_dehydrated_message(self, msg, record):
        settings = utils.get_settings()

        new = EmailMessage()
        if msg.is_multipart():
            for header, value in msg.items():
                new[header] = value
            for part in msg.get_payload():
                new.attach(self._get_dehydrated_message(part, record))
        elif (settings['strip_unallowed_mimetypes']
              and not msg.get_content_type() in settings['allowed_mimetypes']):
            for header, value in msg.items():
                new[header] = value
            # Delete header, otherwise when attempting to  deserialize the
            # payload, it will be expecting a body for this.
            del new['Content-Transfer-Encoding']
            new[settings['altered_message_header']] = (
                'Stripped; Content type %s not allowed' %
                (msg.get_content_type()))
            new.set_payload('')
        elif ((msg.get_content_type() not in settings['text_stored_mimetypes'])
              or ('attachment' in msg.get('Content-Disposition', ''))):
            filename = None
            raw_filename = msg.get_filename()
            if raw_filename:
                filename = utils.convert_header_to_unicode(raw_filename)
            if not filename:
                extension = mimetypes.guess_extension(msg.get_content_type())
            else:
                _, extension = os.path.splitext(filename)
            if not extension:
                extension = '.bin'

            attachment = MessageAttachment()

            attachment.document.save(
                uuid.uuid4().hex + extension,
                ContentFile(
                    six.BytesIO(msg.get_payload(decode=True)).getvalue()))
            attachment.message = record
            for key, value in msg.items():
                attachment[key] = value
            attachment.save()

            placeholder = EmailMessage()
            placeholder[settings['attachment_interpolation_header']] = str(
                attachment.pk)
            new = placeholder
        else:
            content_charset = msg.get_content_charset()
            if not content_charset:
                content_charset = 'ascii'
            try:
                # Make sure that the payload can be properly decoded in the
                # defined charset, if it can't, let's mash some things
                # inside the payload :-\
                msg.get_payload(decode=True).decode(content_charset)
            except LookupError:
                logger.warning("Unknown encoding %s; interpreting as ASCII!",
                               content_charset)
                msg.set_payload(
                    msg.get_payload(decode=True).decode('ascii', 'ignore'))
            except ValueError:
                logger.warning(
                    "Decoding error encountered; interpreting %s as ASCII!",
                    content_charset)
                msg.set_payload(
                    msg.get_payload(decode=True).decode('ascii', 'ignore'))
            new = msg
        return new
Ejemplo n.º 8
0
def fetch_recording(file_field, url):
    """ Download a recording from the given URL as a file field value. """
    response = urlopen(url)
    response_buffer = ContentFile(response.read())
    file_field.save(os.path.basename(url), response_buffer, save=True)
Ejemplo n.º 9
0
def draw_graph(G, pos, measures, measure_name, node_label=None):
    nodes = nx.draw_networkx_nodes(G,
                                   pos,
                                   cmap=plt.cm.plasma,
                                   node_color=list(measures.values()),
                                   nodelist=measures.keys(),
                                   node_size=180,
                                   edgecolors='black',
                                   linewidths=0.7)

    # reversed_measures = {k: k for k, v in measures.items()}
    # labels = nx.draw_networkx_labels(G, pos, labels=reversed_measures, font_size = 3)

    edges = nx.draw_networkx_edges(G, pos, width=0.3, arrowsize=7)
    # edge_labels = nx.get_edge_attributes(G,'weight')
    # edge_labels=nx.draw_networkx_edge_labels(G ,pos, edge_labels, font_size=2)

    nodes.set_norm(mcolors.SymLogNorm(linthresh=0.01, linscale=1))

    if node_label is not None:
        labels = {}
        node_color = {}
        for node in G.nodes():
            if node == node_label:
                labels[node] = node
                node_color[node] = float(measures.get(node, ''))

        nx.draw_networkx_labels(G,
                                pos,
                                labels,
                                font_size=8,
                                font_color='black')

        label_nodes = nx.draw_networkx_nodes(G,
                                             pos,
                                             cmap=plt.cm.plasma,
                                             node_color=list(
                                                 node_color.values()),
                                             nodelist=labels,
                                             node_size=320,
                                             edgecolors='black',
                                             linewidths=0.8)
        label_nodes.set_norm(mcolors.SymLogNorm(linthresh=0.01, linscale=1))

        for group in sorted(nx.connected_components(G.to_undirected())):
            if node_label in group:
                measures = {k: v for k, v in measures.items() if k in group}
                pairs_to_remove = []
                for pair in G.edges():
                    if pair[0] not in group or pair[1] not in group:
                        pairs_to_remove.append(pair)
                for pair in pairs_to_remove:
                    G.remove_edge(pair[0], pair[1])
                break

    plt.title(measure_name)
    plt.colorbar(nodes, shrink=0.6)
    plt.axis('off')

    f = BytesIO()
    plt.savefig(f)
    content_file = ContentFile(f.getvalue())

    if node_label is None:
        model_object = NetworkVisualization.objects.all()

        if len(model_object) == 0:
            model_object = NetworkVisualization.objects.create()
        else:
            model_object = model_object[0]

    else:
        model_object = MyUser.objects.filter(profile_name=node_label)
        if len(model_object) == 0:
            return
        else:
            model_object = model_object[0]

    if measure_name == 'In Degree Centrality':
        model_object.in_degree_centrality.delete()
        model_object.in_degree_centrality.save(measure_name + '.png',
                                               content_file)
    if measure_name == 'Out Degree Centrality':
        model_object.out_degree_centrality.delete()
        model_object.out_degree_centrality.save(measure_name + '.png',
                                                content_file)
    if measure_name == 'Betweenness Centrality':
        model_object.betweenness_centrality.delete()
        model_object.betweenness_centrality.save(measure_name + '.png',
                                                 content_file)
    if measure_name == 'Closeness Centrality':
        model_object.closeness_centrality.delete()
        model_object.closeness_centrality.save(measure_name + '.png',
                                               content_file)
    if measure_name == 'Eigenvector Centrality':
        model_object.eigenvector_centrality.delete()
        model_object.eigenvector_centrality.save(measure_name + '.png',
                                                 content_file)

    model_object.save()
    plt.close()
Ejemplo n.º 10
0
 def setUp(self):
     super(ImageTransparencyTest, self).setUp()
     self.png = PhotoFactory()
     self.png.image.save(
         'trans.png', ContentFile(raw_image('RGBA', 'PNG').read()))
Ejemplo n.º 11
0
def update_submission(request, challenge_pk):
    """
    API endpoint to update submission related attributes

    Query Parameters:

     - ``challenge_phase``: challenge phase id, e.g. 123 (**required**)
     - ``submission``: submission id, e.g. 123 (**required**)
     - ``stdout``: Stdout after evaluation, e.g. "Evaluation completed in 2 minutes" (**required**)
     - ``stderr``: Stderr after evaluation, e.g. "Failed due to incorrect file format" (**required**)
     - ``submission_status``: Status of submission after evaluation
        (can take one of the following values: `FINISHED`/`CANCELLED`/`FAILED`), e.g. FINISHED (**required**)
     - ``result``: contains accuracies for each metric, (**required**) e.g.
            [
                {
                    "split": "split1-codename",
                    "show_to_participant": True,
                    "accuracies": {
                    "metric1": 90
                    }
                },
                {
                    "split": "split2-codename",
                    "show_to_participant": False,
                    "accuracies": {
                    "metric1": 50,
                    "metric2": 40
                    }
                }
            ]
     - ``metadata``: Contains the metadata related to submission (only visible to challenge hosts) e.g:
            {
                "average-evaluation-time": "5 sec",
                "foo": "bar"
            }
    """
    if not is_user_a_host_of_challenge(request.user, challenge_pk):
        response_data = {
            'error': 'Sorry, you are not authorized to make this request!'
        }
        return Response(response_data, status=status.HTTP_400_BAD_REQUEST)

    challenge_phase_pk = request.data.get('challenge_phase')
    submission_pk = request.data.get('submission')
    submission_status = request.data.get('submission_status', '').lower()
    stdout_content = request.data.get('stdout', '')
    stderr_content = request.data.get('stderr', '')
    submission_result = request.data.get('result', '')
    metadata = request.data.get('metadata', '')
    submission = get_submission_model(submission_pk)

    public_results = []
    successful_submission = True if submission_status == Submission.FINISHED else False
    if submission_status not in [
            Submission.FAILED, Submission.CANCELLED, Submission.FINISHED
    ]:
        response_data = {'error': 'Sorry, submission status is invalid'}
        return Response(response_data, status=status.HTTP_400_BAD_REQUEST)

    if successful_submission:
        try:
            results = json.loads(submission_result)
        except ValueError:
            response_data = {
                'error':
                '`result` key contains invalid data. Please try again with correct format!'
            }
            return Response(response_data, status=status.HTTP_400_BAD_REQUEST)

        leaderboard_data_list = []
        for phase_result in results:
            split = phase_result.get('split')
            accuracies = phase_result.get('accuracies')
            show_to_participant = phase_result.get('show_to_participant',
                                                   False)
            try:
                challenge_phase_split = ChallengePhaseSplit.objects.get(
                    challenge_phase__pk=challenge_phase_pk,
                    dataset_split__codename=split)
            except ChallengePhaseSplit.DoesNotExist:
                response_data = {
                    'error':
                    'Challenge Phase Split does not exist with phase_id: {} and'
                    'split codename: {}'.format(challenge_phase_pk, split)
                }
                return Response(response_data,
                                status=status.HTTP_400_BAD_REQUEST)

            leaderboard_metrics = challenge_phase_split.leaderboard.schema.get(
                'labels')
            missing_metrics = []
            malformed_metrics = []
            for metric, value in accuracies.items():
                if metric not in leaderboard_metrics:
                    missing_metrics.append(metric)

                if not (isinstance(value, float) or isinstance(value, int)):
                    malformed_metrics.append((metric, type(value)))

            if len(missing_metrics):
                response_data = {
                    'error':
                    'Following metrics are missing in the'
                    'leaderboard data: {}'.format(missing_metrics)
                }
                return Response(response_data,
                                status=status.HTTP_400_BAD_REQUEST)

            if len(malformed_metrics):
                response_data = {
                    'error':
                    'Values for following metrics are not of'
                    'float/int: {}'.format(malformed_metrics)
                }
                return Response(response_data,
                                status=status.HTTP_400_BAD_REQUEST)

            data = {'result': accuracies}
            serializer = CreateLeaderboardDataSerializer(
                data=data,
                context={
                    'challenge_phase_split': challenge_phase_split,
                    'submission': submission,
                    'request': request,
                })
            if serializer.is_valid():
                leaderboard_data_list.append(serializer)
            else:
                return Response(serializer.errors,
                                status=status.HTTP_400_BAD_REQUEST)

            # Only after checking if the serializer is valid, append the public split results to results file
            if show_to_participant:
                public_results.append(accuracies)

        try:
            with transaction.atomic():
                for serializer in leaderboard_data_list:
                    serializer.save()
        except IntegrityError:
            logger.exception(
                'Failed to update submission_id {} related metadata'.format(
                    submission_pk))
            response_data = {
                'error':
                'Failed to update submission_id {} related metadata'.format(
                    submission_pk)
            }
            return Response(response_data, status=status.HTTP_400_BAD_REQUEST)

    submission.status = submission_status
    submission.completed_at = timezone.now()
    submission.stdout_file.save('stdout.txt', ContentFile(stdout_content))
    submission.stderr_file.save('stderr.txt', ContentFile(stderr_content))
    submission.submission_result_file.save('submission_result.json',
                                           ContentFile(str(public_results)))
    submission.submission_metadata_file.save('submission_metadata_file.json',
                                             ContentFile(str(metadata)))
    submission.save()
    response_data = {
        'success': 'Submission result has been successfully updated'
    }
    return Response(response_data, status=status.HTTP_200_OK)
Ejemplo n.º 12
0
def process(request, id):
    plt_md = PlotMetadata.objects.get(pk=id)
    processed_data = process_raw_data(plt_md)
    plt_md.processed_data.save('processed_' + get_random_string(length=6) + '_' + plt_md.orig_raw_data_filename, ContentFile(processed_data.to_csv()))
    plt_md.save()
    return redirect('/data')
Ejemplo n.º 13
0
 def generate_document(self, invoice, **kwargs):
     """
     Create and save invoice document (as *.html file).
     """
     return ContentFile(self.render_document(invoice, **kwargs))
Ejemplo n.º 14
0
def decode_image(data):
    format, imgstr = data.split(';base64,')
    ext = format.split('/')[-1]
    data = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)  # You can save this as file instance.
    return data
Ejemplo n.º 15
0
    def _post_process(self, paths, adjustable_paths, hashed_files):
        # Sort the files by directory level
        def path_level(name):
            return len(name.split(os.sep))

        for name in sorted(paths, key=path_level, reverse=True):
            substitutions = True
            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:
                cleaned_name = self.clean_name(name)
                hash_key = self.hash_key(cleaned_name)

                # generate the hash with the original content, even for
                # adjustable files.
                if hash_key not in hashed_files:
                    hashed_name = self.hashed_name(name, original_file)
                else:
                    hashed_name = hashed_files[hash_key]

                # then get the original's file content..
                if hasattr(original_file, 'seek'):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    old_hashed_name = hashed_name
                    content = original_file.read().decode(
                        settings.FILE_CHARSET)
                    for extension, patterns in self._patterns.items():
                        if matches_patterns(path, (extension, )):
                            for pattern, template in patterns:
                                converter = self.url_converter(
                                    name, hashed_files, template)
                                try:
                                    content = pattern.sub(converter, content)
                                except ValueError as exc:
                                    yield name, None, exc, False
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(content.encode())
                    if self.keep_intermediate_files:
                        # Save intermediate file for reference
                        self._save(hashed_name, content_file)
                    hashed_name = self.hashed_name(name, content_file)

                    if self.exists(hashed_name):
                        self.delete(hashed_name)

                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = self.clean_name(saved_name)
                    # If the file hash stayed the same, this file didn't change
                    if old_hashed_name == hashed_name:
                        substitutions = False
                    processed = True

                if not processed:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = self.clean_name(saved_name)

                # and then set the cache accordingly
                hashed_files[hash_key] = hashed_name

                yield name, hashed_name, processed, substitutions
Ejemplo n.º 16
0
def get_fake_file(filename):
    fake_file = ContentFile('file data')
    fake_file.name = filename
    return fake_file
def load_peps(apps, schema_editor):
    User = apps.get_model("auth", "User")
    Company = apps.get_model("core", "Company")
    Person = apps.get_model("core", "Person")
    Person2Company = apps.get_model("core", "Person2Company")
    Document = apps.get_model("core", "Document")

    peklun = User.objects.get(username="******")

    with open("core/dicts/new_peps.csv", "r") as fp:
        r = DictReader(fp, errors="ignore")

        for i, l in enumerate(r):
            print(i)
            company_ipn = l.get("ІПН", "")
            company_name = l.get("Назва", "")

            company = None

            if not company_ipn and not company_name:
                continue

            # Search by IPN first (if it's present)
            if company_ipn:
                try:
                    company = Company.objects.get(edrpou=company_ipn)
                except Company.DoesNotExist:
                    pass

            # then search by name (if it's present)
            if company_name:
                if company is None:
                    try:
                        company = Company.objects.get(name=company_name)
                    except Company.DoesNotExist:
                        pass

            if company is None:
                company = Company(state_company=True)

            # Set missing params
            if not company.name:
                company.name = company_name

            if not company.edrpou:
                company.edrpou = company_ipn

            company.save()

            person_name = l.get("ПІБ", "").strip()
            position = l.get("Посада", "").strip()
            person_dob = l.get("Дата народження", "").strip()
            person_from = l.get("Дата призначення", "").strip()
            person_to = l.get("Дата звільнення", "").strip()

            doc_received = l.get("Дата відповіді", "").strip()
            doc = l.get("Лінк на відповідь", "").strip()
            website = l.get("лінк на сайт", "").strip()

            if person_name:
                chunks = person_name.split(" ")
                if len(chunks) == 2:
                    last_name = title(chunks[0])
                    first_name = title(chunks[1])
                else:
                    last_name = title(" ".join(chunks[:-2]))
                    first_name = title(chunks[-2])
                    patronymic = title(chunks[-1])

                # Kind of get_or_create
                try:
                    person = Person.objects.get(first_name__iexact=first_name,
                                                last_name__iexact=last_name,
                                                patronymic__iexact=patronymic)
                except Person.DoesNotExist:
                    person = Person(first_name=first_name,
                                    last_name=last_name,
                                    patronymic=patronymic)

                person.is_pep = True
                person.type_of_official = 1
                if person_dob:
                    person.dob = parse_date(person_dob)
                    if len(person_dob) == 4:
                        person.dob_details = 2  # Only year

                    if len(person_dob) > 4 and len(person_dob) < 7:
                        person.dob_details = 1  # month and year

                person.save()

                doc_instance = None
                if doc and "folderview" not in doc \
                        and "drive/#folders" not in doc:
                    print(doc)
                    doc = expand_gdrive_download_url(doc)
                    doc_hash = sha1(doc).hexdigest()

                    try:
                        doc_instance = Document.objects.get(hash=doc_hash)
                    except Document.DoesNotExist:
                        doc_name, doc_san_name, doc_content = download(doc)
                        doc_san_name = translitua(doc_san_name)

                        if doc_name:
                            doc_instance = Document(name=doc_name,
                                                    uploader=peklun,
                                                    hash=doc_hash)

                            doc_instance.doc.save(doc_san_name,
                                                  ContentFile(doc_content))
                            doc_instance.save()

                link, link_created = Person2Company.objects.update_or_create(
                    from_person=person,
                    to_company=company,
                    date_established=parse_date(person_from),
                    date_finished=parse_date(person_to))

                if not link.relationship_type:
                    link.relationship_type = position

                if doc_instance is not None:
                    link.proof_title = doc_instance.name
                    link.proof = doc_instance.doc.url

                link.date_confirmed = parse_date(doc_received)
                if not doc and website:
                    link.proof = website

                link.save()
Ejemplo n.º 18
0
def lock_import_dir():
    msg = "locked: %s" % timezone.localtime(timezone.now())
    default_storage.save(settings.ARP_IMPORT_LOCK, ContentFile(msg))
Ejemplo n.º 19
0
 def save_systemjs_manifest(self, bundle_files):
     payload = {'paths': bundle_files, 'version': self.manifest_version}
     contents = json.dumps(payload).encode('utf-8')
     self._save(self.systemjs_manifest_name, ContentFile(contents))
Ejemplo n.º 20
0
    def file_upload(self, request, *args, **kwargs):
        # 上传文件/大文件/断点续传
        # todo: 若前端进行并发请求, 将导致保存数据时出现BUG, 后续需要解决, 完善并发请求问题
        if request.method == 'GET':
            query_params = self.request.query_params
            data_type = int(query_params.get('data_type', PROJECT_DATA))
            current_path = query_params.get('current_path', '')
            work_zone_id = int(query_params.get('work_zone', None))
            try:
                work_zone = WorkZone.objects.get(pk=int(work_zone_id))
            except:
                work_zone = None
            if not data_type:
                result = {'success': False, 'messages': '无法确认当前所属文件类型!'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            chunk_index = query_params.get('chunk_index', None)  # 当前是第几个分片
            chunk_size = query_params.get('chunk_size', None)  # 每个分块大小
            current_chunk_size = query_params.get('current_chunk_size', None)  # 当前分块大小
            total_size = int(query_params.get('total_size', 0))  # 文件总大小
            file_md5 = query_params.get('file_md5', None)  # MD5
            file_name = query_params.get('file_name', None)  # 文件名称
            relative_path = query_params.get('relative_path', None)  # 关联路径
            total_chunks = int(query_params.get('total_chunks', 0))  # 分块总数
            if not file_md5:
                result = {'success': False, 'messages': '请指定文件MD5值'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            if not file_name:
                result = {'success': False, 'messages': '请指定文件名称:file_name'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            data_path = get_data_directory_path(data_type=data_type,
                                                current_path=current_path,
                                                work_zone_id=work_zone_id)
            file_path = os.path.join(data_path, file_name)
            save_directory = os.path.join(data_path, f'{file_md5}-part')
            # 若已经存在相同MD5文件, 但文件路径不一致, 将该文件复制到指定路径上, 并返回已上传标志, 实现秒传
            # 文件路径+文件MD5 一致, 即为完全相同的同一个文件,
            # 若文件存在, 即已经上传, 实现秒传
            # 若记录存在, 文件不存在, 即未上传完毕, 对比文件大小以及分块总数, 若对不上, 即分块有变化, 删除已有分块文件, 重新上传
            # 否则返回已上传分块
            saved_dataset = DataSet.objects.filter(file_md5=file_md5, uploaded=True).first()
            dataset = DataSet.objects.filter(data_type=data_type, work_zone=work_zone,
                                             directory_path=current_path, file_name=file_name, file_md5=file_md5).first()
            if saved_dataset and not dataset:
                api_logger.debug(f'已存在相同MD5数据, 目标路径不一样, 进行复制')
                create_or_get_directory(os.path.dirname(file_path))
                shutil.copyfile(saved_dataset.file_full_path, file_path)
                DataSet.objects.create(directory_path=current_path, file_name=file_name, data_type=data_type,
                                       file_type=FILE, file_size=total_size, uploaded=True, file_md5=file_md5,
                                       uploaded_chunks=saved_dataset.uploaded_chunks, total_chunks=total_chunks,
                                       work_zone=work_zone, creator=request.user)
                uploaded = True
                uploaded_chunks = saved_dataset.uploaded_chunks.split(',')
            else:
                if dataset:
                    if os.path.exists(file_path):
                        api_logger.debug(f'已存在文件路径与MD5一致的文件, uploaded_chunks:{dataset.uploaded_chunks}')
                        uploaded = True
                        uploaded_chunks = dataset.uploaded_chunks.split(',')
                    else:
                        if dataset.total_chunks != total_chunks or dataset.file_size != total_size:
                            api_logger.debug(f'存在记录, 但文件不存在, 且分块数与大小对不上')
                            uploaded = False
                            uploaded_chunks = []
                            if os.path.exists(file_path):
                                os.remove(file_path)
                            if os.path.exists(save_directory):
                                shutil.rmtree(save_directory)
                        else:
                            api_logger.debug(f'存在记录, 但文件不存在, 返回已记录上传的分块:{dataset.uploaded_chunks}')
                            uploaded = False
                            uploaded_chunks = dataset.uploaded_chunks.split(',')
                else:
                    api_logger.debug(f'不存在记录')
                    uploaded = False
                    uploaded_chunks = []
                    if os.path.exists(file_path):
                        os.remove(file_path)
                    if os.path.exists(save_directory):
                        shutil.rmtree(save_directory)
            uploaded_chunks = [int(uploaded_chunk) for uploaded_chunk in uploaded_chunks if uploaded_chunk]
            # 确保重新上传最后分块, 触发合并操作
            if len(uploaded_chunks) >= 1:
                uploaded_chunks = uploaded_chunks[:-1]
            result = {'success': True, 'messages': f'当前文件:{file_name}的分块情况',
                      'results': {'file_path': file_path, 'file_name': file_name,
                                  'uploaded': uploaded,
                                  'uploaded_chunks': uploaded_chunks,
                                  }}
            return Response(result, status=status.HTTP_200_OK)
        else:
            data_type = int(request.data.get('data_type', 0))
            current_path = request.data.get('current_path', '')
            work_zone_id = int(request.data.get('work_zone', 0))
            try:
                work_zone = WorkZone.objects.get(pk=int(work_zone_id))
            except:
                work_zone = None
            if not data_type:
                result = {'success': False, 'messages': '无法确认当前所属文件类型!'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            chunk_index = request.data.get('chunk_index', None)  # 当前是第几个分片
            chunk_size = request.data.get('chunk_size', None)  # 每个分块大小
            current_chunk_size = request.data.get('current_chunk_size', None)  # 当前分块大小
            total_size = int(request.data.get('total_size', 0))  # 文件总大小
            file_md5 = request.data.get('file_md5', None)  # MD5
            file_name = request.data.get('file_name', None)  # 文件名称
            relative_path = request.data.get('relative_path', None)  # 关联路径
            total_chunks = int(request.data.get('total_chunks', 0))  # 分块总数
            chunk_file = request.data.get('file', None)  # 具体文件

            dataset = DataSet.objects.filter(data_type=data_type, work_zone=work_zone,
                                             directory_path=current_path, file_name=file_name, file_md5=file_md5).first()
            if not dataset:
                dataset = DataSet.objects.create(data_type=data_type, file_type=10, work_zone=work_zone, file_md5=file_md5,
                                                 directory_path=current_path, file_name=file_name,
                                                 file_size=total_size, total_chunks=total_chunks,
                                                 creator=request.user)
            save_directory = os.path.join(dataset.directory_full_path, f'{file_md5}-part')
            save_path = os.path.join(save_directory, f'{file_name}.part{chunk_index}')
            # 保存
            # default_storage不会覆盖文件, 若文件存在, 删除后重新上传
            if default_storage.exists(save_path):
                default_storage.delete(save_path)
            default_storage.save(save_path, ContentFile(chunk_file.read()))
            uploaded_chunks = dataset.uploaded_chunks
            api_logger.debug(f'当前分块:{chunk_index}')
            if uploaded_chunks:
                uploaded_chunks = set(uploaded_chunks.split(','))
                uploaded_chunks.add(chunk_index)
            else:
                uploaded_chunks = [chunk_index]
            api_logger.debug(f'保存后所有分块:{uploaded_chunks}')
            dataset.uploaded_chunks = ','.join(list(uploaded_chunks))
            dataset.save()
            api_logger.debug(f'当前分块长度:{len(dataset.uploaded_chunks.split(","))}, 获取分块长度:{total_chunks}')
            if len(dataset.uploaded_chunks.split(',')) == int(total_chunks):
                api_logger.debug(f'文件全部接收, 开始合并:{save_directory}/*.part*')
                uploaded = True
                with open(dataset.file_full_path, 'wb') as uploaded_file:
                    for index in range(int(total_chunks)):
                        chunk_file = os.path.join(save_directory, f'{file_name}.part{index+1}')
                        api_logger.debug(f'当前文件{chunk_file}')
                        try:
                            chunk_file = open(chunk_file, 'rb')  # 按序打开每个分片
                            uploaded_file.write(chunk_file.read())  # 读取分片内容写入新文件
                            chunk_file.close()
                        except Exception as error:
                            api_logger.error(f'合并文件:{file_name} form {save_directory}失败:{error}')
                            uploaded = False
                            # 检查合并后的MD5
                uploaded_file_md5 = check_md5_sum(file_name=dataset.file_full_path)
                if uploaded_file_md5 != file_md5:
                    api_logger.debug(f'合并文件MD5不一致:{uploaded_file_md5}, {file_md5}')
                    uploaded = False
                if uploaded:
                    dataset.uploaded = uploaded
                    dataset.save()
                    shutil.rmtree(save_directory)
                    result = {'success': True, 'messages': '成功上传文件并合并!',
                              'results': {'uploaded': uploaded, 'total_chunks': total_chunks,
                                          'file_name': file_name, 'file_md5': file_md5}}
                    return Response(result, status=status.HTTP_200_OK)
                else:
                    result = {'success': False, 'messages': '合并文件失败, 请重新上传!'}
                    return Response(result, status=status.HTTP_400_BAD_REQUEST)
            else:
                result = {'success': True, 'messages': f'成功上传分块文件:{chunk_index}!',
                          'results': {'uploaded': False, 'total_chunks': total_chunks,
                                      'file_name': file_name, 'file_md5': file_md5}
                          }
                return Response(result, status=status.HTTP_200_OK)
Ejemplo n.º 21
0
        get_storage_path = PHOTOLOGUE_PATH
    else:
        parts = PHOTOLOGUE_PATH.split('.')
        module_name = '.'.join(parts[:-1])
        module = import_module(module_name)
        get_storage_path = getattr(module, parts[-1])
else:
    def get_storage_path(instance, filename):
        fn = unicodedata.normalize('NFKD', force_text(filename)).encode('ascii', 'ignore').decode('ascii')
        return os.path.join(PHOTOLOGUE_DIR, 'photos', fn)

# Support CACHEDIR.TAG spec for backups for ignoring cache dir.
# See http://www.brynosaurus.com/cachedir/spec.html
PHOTOLOGUE_CACHEDIRTAG = os.path.join(PHOTOLOGUE_DIR, "photos", "cache", "CACHEDIR.TAG")
if not default_storage.exists(PHOTOLOGUE_CACHEDIRTAG):
    default_storage.save(PHOTOLOGUE_CACHEDIRTAG, ContentFile(
        "Signature: 8a477f597d28d172789f06886806bc55"))

# Exif Orientation values
# Value 0thRow	0thColumn
#   1	top     left
#   2	top     right
#   3	bottom	right
#   4	bottom	left
#   5	left	top
#   6	right   top
#   7	right   bottom
#   8	left    bottom

# Image Orientations (according to EXIF informations) that needs to be
# transposed and appropriate action
IMAGE_EXIF_ORIENTATION_MAP = {
Ejemplo n.º 22
0
def image(request, path, token, autogen=False):

    is_admin = False
    if ("is_admin=true" in token and request and request.user.has_perm('admin')) or autogen:
        parameters = token
        is_admin = True
        if autogen:
            token = image_create_token(parameters)
    else:
        parameters = request.session.get(token, token)

    cached_image_file = os.path.join(path, token)

    now = timezone.now()
    expire_offset = timezone.timedelta(seconds=IMAGE_CACHE_HTTP_EXPIRATION)

    response = HttpResponse()
    response['Content-type'] = 'image/jpeg'
    response['Expires'] = (now + expire_offset).strftime("%a, %d %b %Y %T GMT")
    response['Last-Modified'] = now.strftime("%a, %d %b %Y %T GMT")
    response['Cache-Control'] = 'max-age=3600, must-revalidate'
    response.status_code = 200

    # If we already have the cache we send it instead of recreating it
    if IMAGE_CACHE_STORAGE.exists(cached_image_file):
        
        if autogen:
            return 'Already generated'
        
        try:
            f = IMAGE_CACHE_STORAGE.open(cached_image_file, "r")
        except IOError:
            raise Http404()
        response.write(f.read())
        f.close()

        response['Last-Modified'] = IMAGE_CACHE_STORAGE.modified_time(cached_image_file).strftime("%a, %d %b %Y %T GMT")
        return response
    
    if parameters == token and not is_admin:
        return HttpResponse("Forbidden", status=403)

    qs = QueryDict(parameters)

    file_storage = MEDIA_STORAGE
    if qs.get('static', '') == "true":
        file_storage = STATIC_STORAGE

    format = qs.get('format', IMAGE_DEFAULT_FORMAT)
    quality = int(qs.get('quality', IMAGE_DEFAULT_QUALITY))
    mask = qs.get('mask', None)
    mask_source = qs.get('mask_source', None)

    if mask is not None:
        format = "PNG"

    fill = qs.get('fill', None)
    background = qs.get('background', None)
    tint = qs.get('tint', None)

    center = qs.get('center', ".5,.5")
    mode = qs.get('mode', "crop")
        
    overlays = qs.getlist('overlay')
    overlay_sources = qs.getlist('overlay_source')
    overlay_tints = qs.getlist('overlay_tint')
    overlay_sizes = qs.getlist('overlay_size')
    overlay_positions = qs.getlist('overlay_position')

    width = int(qs.get('width', None))
    height = int(qs.get('height', None))
    try:
        padding = float(qs.get('padding',None))
    except TypeError:
        padding = 0.0

    if "video" in qs:
        data, http_response = generate_thumb(file_storage, smart_unicode(path), width=width, height=height)
        response.status_code = http_response
    else:
        try:
            try:
                f = urllib.urlopen(qs['url'])
                data = f.read()
                f.close()
            except KeyError:
                f = file_storage.open(path)
                data = f.read()
                f.close()
        except IOError:
            response.status_code = 404
            data = ""

    if data:
        try:
            if mode == "scale":
                output_data = scale(data, width, height, path, padding=padding, overlays=overlays, overlay_sources=overlay_sources, overlay_tints=overlay_tints, overlay_positions=overlay_positions, overlay_sizes=overlay_sizes, mask=mask, mask_source=mask_source, format=format, quality=quality, fill=fill, background=background, tint=tint)
            else:
                output_data = scaleAndCrop(data, width, height, path, True, padding=padding, overlays=overlays, overlay_sources=overlay_sources, overlay_tints=overlay_tints, overlay_positions=overlay_positions, overlay_sizes=overlay_sizes, mask=mask, mask_source=mask_source, center=center, format=format, quality=quality, fill=fill, background=background, tint=tint)
        except IOError:
            traceback.print_exc()
            response.status_code = 500
            output_data = ""
    else:
        output_data = data

    if response.status_code == 200:
        IMAGE_CACHE_STORAGE.save(cached_image_file,  ContentFile(output_data))
        if autogen:
            return 'Generated ' + str(response.status_code)
    else:
        if autogen:
            return 'Failed ' + cached_image_file
    
    response.write(output_data)

    return response
Ejemplo n.º 23
0
 def test_content_file_default_name(self):
     self.assertIsNone(ContentFile(b"content").name)
Ejemplo n.º 24
0
def modify(title,content):
    filename = f"entries/{title}.md"
    default_storage.delete(filename)
    default_storage.save(filename, ContentFile(content))
    return True
Ejemplo n.º 25
0
 def test_open_resets_file_to_start_and_returns_context_manager(self):
     file = ContentFile(b'content')
     with file.open() as f:
         self.assertEqual(f.read(), b'content')
     with file.open() as f:
         self.assertEqual(f.read(), b'content')
Ejemplo n.º 26
0
    def test_files(self):
        temp_storage.save('tests/default.txt', ContentFile('default content'))
        # Attempting to access a FileField from the class raises a descriptive
        # error
        self.assertRaises(AttributeError, lambda: Storage.normal)

        # An object without a file has limited functionality.
        obj1 = Storage()
        self.assertEqual(obj1.normal.name, "")
        self.assertRaises(ValueError, lambda: obj1.normal.size)

        # Saving a file enables full functionality.
        obj1.normal.save("django_test.txt", ContentFile("content"))
        self.assertEqual(obj1.normal.name, "tests/django_test.txt")
        self.assertEqual(obj1.normal.size, 7)
        self.assertEqual(obj1.normal.read(), "content")
        obj1.normal.close()

        # File objects can be assigned to FileField attributes, but shouldn't
        # get committed until the model it's attached to is saved.
        obj1.normal = SimpleUploadedFile("assignment.txt", "content")
        dirs, files = temp_storage.listdir("tests")
        self.assertEqual(dirs, [])
        self.assertEqual(sorted(files), ["default.txt", "django_test.txt"])

        obj1.save()
        dirs, files = temp_storage.listdir("tests")
        self.assertEqual(sorted(files),
                         ["assignment.txt", "default.txt", "django_test.txt"])

        # Files can be read in a little at a time, if necessary.
        obj1.normal.open()
        self.assertEqual(obj1.normal.read(3), "con")
        self.assertEqual(obj1.normal.read(), "tent")
        self.assertEqual(list(obj1.normal.chunks(chunk_size=2)),
                         ["co", "nt", "en", "t"])
        obj1.normal.close()

        # Save another file with the same name.
        obj2 = Storage()
        obj2.normal.save("django_test.txt", ContentFile("more content"))
        self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
        self.assertEqual(obj2.normal.size, 12)

        # Push the objects into the cache to make sure they pickle properly
        cache.set("obj1", obj1)
        cache.set("obj2", obj2)
        self.assertEqual(
            cache.get("obj2").normal.name, "tests/django_test_1.txt")

        # Deleting an object does not delete the file it uses.
        obj2.delete()
        obj2.normal.save("django_test.txt", ContentFile("more content"))
        self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")

        # Multiple files with the same name get _N appended to them.
        objs = [Storage() for i in range(3)]
        for o in objs:
            o.normal.save("multiple_files.txt", ContentFile("Same Content"))
        self.assertEqual([o.normal.name for o in objs], [
            "tests/multiple_files.txt", "tests/multiple_files_1.txt",
            "tests/multiple_files_2.txt"
        ])
        for o in objs:
            o.delete()

        # Default values allow an object to access a single file.
        obj3 = Storage.objects.create()
        self.assertEqual(obj3.default.name, "tests/default.txt")
        self.assertEqual(obj3.default.read(), "default content")
        obj3.default.close()

        # But it shouldn't be deleted, even if there are no more objects using
        # it.
        obj3.delete()
        obj3 = Storage()
        self.assertEqual(obj3.default.read(), "default content")
        obj3.default.close()

        # Verify the fix for #5655, making sure the directory is only
        # determined once.
        obj4 = Storage()
        obj4.random.save("random_file", ContentFile("random content"))
        self.assertTrue(obj4.random.name.endswith("/random_file"))

        # Clean up the temporary files and dir.
        obj1.normal.delete()
        obj2.normal.delete()
        obj3.default.delete()
        obj4.random.delete()
Ejemplo n.º 27
0
 def get_file(self):
     workflow = self.get_object()
     return ContentFile(workflow.render(), name=workflow.label)
Ejemplo n.º 28
0
def create_csv_seb(user=None):
    this_year_participates = set(
        obj.get('email') for obj in Participant.objects.filter(
            is_participating=True,
            competition_id__in=(38, 39, 40, 41, 42, 43, 44, 45)).exclude(
                email='').values('email').annotate(
                    c=Count('id')).order_by('-c'))
    this_year_applications = set(
        obj.get('email') for obj in Application.objects.filter(
            participant__is_participating=True,
            competition_id__in=(38, 39, 40, 41, 42, 43, 44, 45)).exclude(
                email='').values('email').annotate(
                    c=Count('id')).order_by('-c'))
    this_year = this_year_participates.union(this_year_applications)

    sec_stage_participates = set(
        obj.get('email') for obj in Participant.objects.
        filter(is_participating=True, competition_id__in=(40, )).exclude(
            email='').values('email').annotate(c=Count('id')).order_by('-c'))
    sec_stage_applications = set(
        obj.get('email') for obj in Application.objects.filter(
            participant__is_participating=True, competition_id__in=(
                40, )).exclude(email='').values('email').annotate(
                    c=Count('id')).order_by('-c'))
    sec_stage = sec_stage_participates.union(sec_stage_applications)

    first_stage_participates = set(
        obj.get('email') for obj in Participant.objects.
        filter(is_participating=True, competition_id__in=(39, )).exclude(
            email='').exclude(email__in=sec_stage).values('email').annotate(
                c=Count('id')).order_by('-c'))
    first_stage_applications = set(
        obj.get('email') for obj in Application.objects.filter(
            participant__is_participating=True, competition_id__in=(
                39, )).exclude(email='').exclude(
                    email__in=sec_stage).values('email').annotate(
                        c=Count('id')).order_by('-c'))
    not_in_second_stage = first_stage_participates.union(
        first_stage_applications)

    this_year_not_payed = set(
        obj.get('email') for obj in Participant.objects.filter(
            is_participating=False, competition_id__in=(40, )).exclude(
                email='').exclude(
                    email__in=not_in_second_stage).values('email').annotate(
                        c=Count('id')).order_by('-c'))

    last_year_participates = set(
        obj.get('email') for obj in Participant.objects.filter(
            competition_id__in=(25, 26, 27, 28, 29, 30, 31, 32)).exclude(
                email='').exclude(email__in=this_year).exclude(
                    email__in=this_year_not_payed).values('email').annotate(
                        c=Count('id')).order_by('-c'))
    last_year_applications = set(
        obj.get('email') for obj in Application.objects.filter(
            competition_id__in=(25, 26, 27, 28, 29, 30, 31, 32)).exclude(
                email='').exclude(email__in=this_year).exclude(
                    email__in=this_year_not_payed).values('email').annotate(
                        c=Count('id')).order_by('-c'))
    last_year = last_year_participates.union(last_year_applications)

    # previously = set(obj.get('participant_email') for obj in Ev68RVeloParticipations.objects.filter(
    #     competition_id__in=(12, 13, 14, 15, 16, 17, 18, 27, 28, 29, 30, 31, 32, 33, 34)).exclude(
    #     participant_email='').exclude(participant_email__in=this_year).exclude(
    #     participant_email__in=this_year_not_payed).exclude(participant_email__in=last_year).values(
    #     'participant_email').annotate(c=Count('id')).order_by('-c'))

    file_obj = BytesIO()
    wrt = csv.writer(file_obj)
    wrt.writerow(['Email'])
    for email in this_year:
        wrt.writerow([email.encode('utf-8')])
    file_obj.seek(0)
    obj = TempDocument(created_by=user)
    obj.doc.save("this_year.csv", ContentFile(file_obj.read()))
    obj.save()
    file_obj.close()

    file_obj = BytesIO()
    wrt = csv.writer(file_obj)
    wrt.writerow(['Email'])
    for email in this_year_not_payed:
        wrt.writerow([email.encode('utf-8')])
    file_obj.seek(0)
    obj1 = TempDocument(created_by=user)
    obj1.doc.save("this_year_not_payed.csv", ContentFile(file_obj.read()))
    obj1.save()
    file_obj.close()

    file_obj = BytesIO()
    wrt = csv.writer(file_obj)
    wrt.writerow(['Email'])
    for email in last_year:
        wrt.writerow([email.encode('utf-8')])
    file_obj.seek(0)
    obj2 = TempDocument(created_by=user)
    obj2.doc.save("last_year.csv", ContentFile(file_obj.read()))
    obj2.save()
    file_obj.close()

    file_obj = BytesIO()
    wrt = csv.writer(file_obj)
    wrt.writerow(['Email'])
    for email in not_in_second_stage:
        wrt.writerow([email.encode('utf-8')])
    file_obj.seek(0)
    obj4 = TempDocument(created_by=user)
    obj4.doc.save("not_in_sec_stage.csv", ContentFile(file_obj.read()))
    obj4.save()
    file_obj.close()

    html = """this_year_not_payed: <a href="{0}{1}">{0}{1}</a><br>
last_year: <a href="{0}{2}">{0}{2}</a><br>
this_year_participates: <a href="{0}{3}">{0}{3}</a><br>
not_in_sec_stage: <a href="{0}{4}">{0}{4}</a><br>
    """.format(settings.MY_DEFAULT_DOMAIN, obj1.doc.url, obj2.doc.url,
               obj.doc.url, obj4.doc.url)

    txt = """this_year_not_payed: {0}{1}
last_year: {0}{2}
this_year_participates: {0}{3}
not_in_sec_stage: {0}{4}
    """.format(settings.MY_DEFAULT_DOMAIN, obj1.doc.url, obj2.doc.url,
               obj.doc.url, obj4.doc.url)

    send_mail(subject='Emails for marketing',
              message=txt,
              from_email=settings.SERVER_EMAIL,
              recipient_list=[
                  user.email,
              ],
              html_message=html)
Ejemplo n.º 29
0
 (RegexValidator(re.compile('[0-9]+')), '1234', None),
 (RegexValidator('.*'), '', None),
 (RegexValidator(re.compile('.*')), '', None),
 (RegexValidator('.*'), 'xxxxx', None),
 (RegexValidator('x'), 'y', ValidationError),
 (RegexValidator(re.compile('x')), 'y', ValidationError),
 (RegexValidator('x', inverse_match=True), 'y', None),
 (RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
 (RegexValidator('x', inverse_match=True), 'x', ValidationError),
 (RegexValidator(re.compile('x'),
                 inverse_match=True), 'x', ValidationError),
 (RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
 (RegexValidator('a'), 'A', ValidationError),
 (RegexValidator('a', flags=re.IGNORECASE), 'A', None),
 (FileExtensionValidator(['txt']),
  ContentFile('contents',
              name='fileWithUnsupportedExt.jpg'), ValidationError),
 (FileExtensionValidator(['txt']),
  ContentFile('contents', name='fileWithNoExtenstion'), ValidationError),
 (FileExtensionValidator([]), ContentFile('contents', name='file.txt'),
  ValidationError),
 (FileExtensionValidator(['txt']), ContentFile('contents',
                                               name='file.txt'), None),
 (FileExtensionValidator(), ContentFile('contents', name='file.jpg'), None),
 (validate_image_file_extension, ContentFile('contents',
                                             name='file.jpg'), None),
 (validate_image_file_extension, ContentFile('contents',
                                             name='file.png'), None),
 (validate_image_file_extension, ContentFile('contents', name='file.txt'),
  ValidationError),
 (validate_image_file_extension, ContentFile('contents',
                                             name='file'), ValidationError),
Ejemplo n.º 30
0
def add_info(request, param):
    if request.method == "POST":
        if 'data' in request.POST:
            data = request.POST.get('data').strip()
        profile = Profile.objects.get(user=request.user)
        if param == "name" and data != "":
            if profile.name != data:
                profile.name = data
                profile.save()
            return JsonResponse({
                "status": 201,
            })
        elif param == "email" and data != "":
            try:
                if profile.email != data:
                    old_email = profile.email
                    profile.email = data
                    profile.save()
                    subject = "Success! Email Changed | TourDay"
                    message = f"Hi {request.user.username},\nSuccess! Your Email has been changed!\n\nYour new email address is {profile.email}.\n\nIf you didn't changed your email, then your account is at risk. Contact TourDay Team as soon as possible.\n\nThanks,\nTourDay Team"
                    async_send_mail(subject, message,
                                    EMAIL_HOST_USER, old_email)

                    subject = "Success! Email Added | TourDay"
                    message = f"Hi {request.user.username},\nSuccess! This email has been added as your default email for TourDay.\n\nIf you received this email but didn't register for an TourDay account, something's gone wrong, Reply to this email to de-activate and close this account.\n\nThanks,\nTourDay Team"
                    async_send_mail(subject, message,
                                    EMAIL_HOST_USER, profile.email)
                return JsonResponse({
                    "status": 201,
                })
            except:
                return JsonResponse({
                    "status": 400,
                })

        elif param == "fb" and data != "":
            if profile.fb != data:
                profile.fb = data
                profile.save()
            return JsonResponse({
                "status": 201,
            })
        elif param == "insta" and data != "":
            if profile.insta != data:
                profile.insta = data
                profile.save()
            return JsonResponse({
                "status": 201,
            })

        elif param == "password" and data != "":
            user = request.user
            user.set_password(data)
            user.save()
            login(request, user)
            subject = "Success! Password Changed | TourDay"
            message = f"Hi {user.username},\nSuccess! Your Password has been changed!\n\nIf you didn't changed your password, then your account is at risk. Contact TourDay Team as soon as possible.\n\nThanks,\nTourDay Team"
            async_send_mail(subject, message,
                            EMAIL_HOST_USER, request.user.email)
            return JsonResponse({
                "status": 201,
            })

        elif param == "bio" and data != "":
            if profile.bio != data:
                profile.bio = data
                profile.save()
            return JsonResponse({
                "status": 201,
            })

        elif param == "city" and data != "":
            if profile.city != data:
                profile.city = data
                profile.save()
            return JsonResponse({
                "status": 201,
            })

        elif param == "picture":
            image_data = request.POST.get("picture")
            format, imgstr = image_data.split(';base64,')
            print("format", format)
            ext = format.split('/')[-1]
            data = ContentFile(base64.b64decode(imgstr))
            file_name = "'myphoto." + ext
            profile.picture.save(file_name, data, save=True)

            return JsonResponse({
                "status": 201,
                "new_img": profile.picture.url,

            })
        else:
            return JsonResponse({}, status=404)