Ejemplo n.º 1
0
def createSVGView(request, filename):
    """
    This view receives the svg information from the workspace and saves the file
    """
    if request.is_ajax():
        filenameRegex = re.search(r'(?P<filename>[a-zA-Z]+[\d\.]*)\.(?P<extension>[a-zA-Z]{1,4}$)', filename)
        cleanFileName = filenameRegex.group('filename')
        cleanFileExtension = filenameRegex.group('extension')
        newFile = ContentFile(cleanFileName+'.svg', 'w')
        newFile.name = cleanFileName+'.svg'


        fileContent = base64.b64decode(request.POST['svg']).decode('utf-8')

        newFile.write(fileContent)


        newFileDB = UploadSVGFile(file=newFile)
        newFileDB.save()

        response_data = {
            'success': 1,
            'url': newFileDB.file.url,
            'filename': filename,
            'extension': cleanFileExtension
        }

        return HttpResponse(json.dumps(response_data), content_type="application/json")
Ejemplo n.º 2
0
 def test_size_changing_after_writing(self):
     """ContentFile.size changes after a write()."""
     f = ContentFile('')
     self.assertEqual(f.size, 0)
     f.write('Test ')
     f.write('string')
     self.assertEqual(f.size, 11)
     with f.open() as fh:
         self.assertEqual(fh.read(), 'Test string')
Ejemplo n.º 3
0
 def test_size_changing_after_writing(self):
     """ContentFile.size changes after a write()."""
     f = ContentFile('')
     self.assertEqual(f.size, 0)
     f.write('Test ')
     f.write('string')
     self.assertEqual(f.size, 11)
     with f.open() as fh:
         self.assertEqual(fh.read(), 'Test string')
 def get_compiled_content_file(self, asset):
     if isinstance(asset, GenericAsset):
         return registry_instance.finder_service.open_asset(asset)
     else:
         ret = ContentFile('')
         asset_manifest = registry_instance.asset_manifest_repository.get_asset_manifest_with_asset(asset)
         for content_line in asset_manifest.get_content_lines():
             ret.write(force_bytes(content_line))
             ret.write(force_bytes("\n"))
         return ret
Ejemplo n.º 5
0
 def store_rows(self, course_id, filename, rows):
     """
     Given a course_id, filename, and rows (each row is an iterable of
     strings), write the rows to the storage backend in csv format.
     """
     output_buffer = ContentFile('')
     # Adding unicode signature (BOM) for MS Excel 2013 compatibility
     output_buffer.write(codecs.BOM_UTF8)
     csvwriter = csv.writer(output_buffer)
     csvwriter.writerows(self._get_utf8_encoded_rows(rows))
     output_buffer.seek(0)
     self.store(course_id, filename, output_buffer)
Ejemplo n.º 6
0
 def store_rows(self, course_id, filename, rows):
     """
     Given a course_id, filename, and rows (each row is an iterable of
     strings), write the rows to the storage backend in csv format.
     """
     output_buffer = ContentFile('')
     # Adding unicode signature (BOM) for MS Excel 2013 compatibility
     output_buffer.write(codecs.BOM_UTF8)
     csvwriter = csv.writer(output_buffer)
     csvwriter.writerows(self._get_utf8_encoded_rows(rows))
     output_buffer.seek(0)
     self.store(course_id, filename, output_buffer)
Ejemplo n.º 7
0
class ChunkyUploadSaver(object):

    def __init__(self, filename):
        self.filename = filename
        self.content_file = None

    def update_file(self, content):
        if self.content_file is None:
            self.content_file = ContentFile(
                content,
                name=self.filename,
            )
        else:
            self.content_file.write(content)
Ejemplo n.º 8
0
    def add_rows(self, rows, output_buffer=None):
        """
        Given an output buffer and rows (each row is an iterable of
        strings), add rows to output buffer in csv format and return it.
        """
        if not output_buffer:
            output_buffer = ContentFile('')
            # Adding unicode signature (BOM) for MS Excel 2013 compatibility
            if six.PY2:
                output_buffer.write(codecs.BOM_UTF8)

        csvwriter = csv.writer(output_buffer)
        csvwriter.writerows(self._get_utf8_encoded_rows(rows))
        return output_buffer
Ejemplo n.º 9
0
def save_md(content, name, tag):
    name = name + '.md'
    file = ContentFile(content, name)
    dir = settings.MEDIA_ROOT + '/md/%s/' % tag
    if not os.path.exists(dir):
        os.mkdir(dir)

    absolute_path = '/md/%s/' % tag + name
    path = dir + name

    with open(path, 'w+') as f:
        file = File(f)
        file.write(content)
    return absolute_path
Ejemplo n.º 10
0
    def test_no_content_length_from_fd(self):
        """Test disabling content_length_from_fd on save"""
        backend = self.default_storage('v3', content_length_from_fd=False)
        content = dict(orig="Hello world!")
        content_file = ContentFile("")
        content_file.write(content['orig'])

        def mocked_put_object(cls, url, token, container, name=None,
                              contents=None, content_length=None, *args, **kwargs):
            content['saved'] = contents.read()
            content['size'] = content_length

        with patch('tests.utils.FakeSwift.put_object', new=classmethod(mocked_put_object)):
            backend.save('test.txt', content_file)
        self.assertEqual(content['saved'], content['orig'])
        self.assertIsNone(content['size'])
Ejemplo n.º 11
0
    def test_no_content_length_from_fd(self):
        """Test disabling content_length_from_fd on save"""
        backend = self.default_storage('v3', content_length_from_fd=False)
        content = dict(orig="Hello world!")
        content_file = ContentFile("")
        content_file.write(content['orig'])

        def mocked_put_object(cls, url, token, container, name=None,
                              contents=None, content_length=None, *args, **kwargs):
            content['saved'] = contents.read()
            content['size'] = content_length

        with patch('tests.utils.FakeSwift.put_object', new=classmethod(mocked_put_object)):
            backend.save('test.txt', content_file)
        self.assertEqual(content['saved'], content['orig'])
        self.assertIsNone(content['size'])
Ejemplo n.º 12
0
def changeComments(request):

    data = json.loads(request.body.decode('utf-8'))
    comments = data.get('comments', None)
    commentary_uri = data.get('commentary_uri', None)

    commentary = ContentFile(b'')
    for comment in comments:
        temp = comment['text'] + '\n'
        commentary.write(temp.encode('utf-8'))



    resource_texts = Resource.objects.get(original_object_uri=commentary_uri)
    resource_texts.source.save(
        'commentary_' + str(resource_texts.pk) + '.txt', commentary)
    return HttpResponse(status=200)
Ejemplo n.º 13
0
def generate(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
    """
    For a given `course_id`, generate a CSV file containing
    all student answers to a given problem, and store using a `ReportStore`.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    num_reports = 1
    task_progress = TaskProgress(action_name, num_reports, start_time)
    current_step = {'step': 'XblockCompletion - Calculating students answers to problem'}
    task_progress.update_task_state(extra_meta=current_step)
    
    data = task_input.get('data')
    filter_types = ['problem']
    students = XblockCompletionView().get_all_enrolled_users(data['course'])
    course_structure = get_data_course(data['course'])

    report_store = ReportStore.from_config('GRADES_DOWNLOAD')
    csv_name = 'Reporte_de_Preguntas'
    if data['format']:
        csv_name = 'Reporte_de_Preguntas_Resumen'

    report_name = u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
        course_prefix=course_filename_prefix_generator(course_id),
        csv_name=csv_name,
        timestamp_str=start_date.strftime("%Y-%m-%d-%H%M")
    )
    output_buffer = ContentFile('')
    if six.PY2:
        output_buffer.write(codecs.BOM_UTF8)
    csvwriter = csv.writer(output_buffer)

    student_states = XblockCompletionView().get_all_states(data['course'], filter_types)
    csvwriter = XblockCompletionView()._build_student_data(data, students, course_structure, student_states, filter_types, csvwriter)

    current_step = {'step': 'XblockCompletion - Uploading CSV'}
    task_progress.update_task_state(extra_meta=current_step)

    output_buffer.seek(0)
    report_store.store(course_id, report_name, output_buffer)
    current_step = {
        'step': 'XblockCompletion - CSV uploaded',
        'report_name': report_name,
    }

    return task_progress.update_task_state(extra_meta=current_step)
Ejemplo n.º 14
0
def changeComments(request):
    if request.method == 'POST':
        data = json.loads(request.body.decode('utf-8'))
        comments = data.get('comments', None)
        resource_pk = data.get('resource_pk', None)
        name = str(datetime.datetime.now().time())[:8]

        commentary = ContentFile(b'')
        for comment in comments:
            temp = comment['text'] + '\n'
            commentary.write(temp.encode('utf-8'))

        resource_texts = ResourceTexts.objects.get(resource__pk=resource_pk)
        resource_texts.commentary.save(
            'commentary_' + name + '.txt', commentary)
        return HttpResponse(status=200)
    return HttpResponse(status=403)
Ejemplo n.º 15
0
def uploadDocxFirstTable(request):
    file_d = request.FILES['file']
    name = str(datetime.datetime.now().time())[:8]
    if file_d is None:
        return HttpResponse(status=403)
    docx_file = Document(file_d)
    tables = docx_file.tables
    info_table = tables[0]

    response = {}

    # название
    response['title'] = info_table.rows[0].cells[1].text if len(info_table.rows[0].cells[1].text) != 0 else ''

    # название на национальном языке
    response['lang_origin'] = info_table.rows[1].cells[1].text if len(info_table.rows[1].cells[1].text) != 0 else ''

    # язык
    response['lang'] = info_table.rows[2].cells[1].text if len(info_table.rows[2].cells[1].text) != 0 else ''

    # диалект
    response['dialect'] = info_table.rows[3].cells[1].text if len(info_table.rows[3].cells[1].text) != 0 else ''

    # говор
    response['speech'] = info_table.rows[4].cells[1].text if len(info_table.rows[4].cells[1].text) != 0 else ''

    # жанр
    response['genre'] = info_table.rows[5].cells[1].text if len(info_table.rows[5].cells[1].text) != 0 else ''

    # обряд
    response['obr'] = info_table.rows[6].cells[1].text if len(info_table.rows[6].cells[1].text) != 0 else ''

    # время записи
    response['time'] = info_table.rows[7].cells[1].text if len(info_table.rows[7].cells[1].text) != 0 else ''

    # место записи
    response['place'] = info_table.rows[8].cells[1].text if len(info_table.rows[8].cells[1].text) != 0 else ''

    # исполнитель
    response['permormed_by'] = info_table.rows[9].cells[1].text if len(info_table.rows[9].cells[1].text) != 0 else ''

    # собиратель
    response['collected_by'] = info_table.rows[10].cells[1].text if len(info_table.rows[10].cells[1].text) != 0 else ''

    # расшифровка аудиозаписи
    response['decrypted_by'] = info_table.rows[11].cells[1].text if len(info_table.rows[11].cells[1].text) != 0 else ''

    # нотирование
    response['notation_by'] = info_table.rows[12].cells[1].text if len(info_table.rows[12].cells[1].text) != 0 else ''

    # перевод на русский язык
    response['transalted_by'] = info_table.rows[13].cells[1].text if len(info_table.rows[13].cells[1].text) != 0 else ''

    # редактор перевода
    response['editor'] = info_table.rows[14].cells[1].text if len(info_table.rows[14].cells[1].text) != 0 else ''

    # редактор национального текста
    response['redactor'] = info_table.rows[15].cells[1].text if len(info_table.rows[15].cells[1].text) != 0 else ''

    # подготовка клмментариев
    response['commantator'] = info_table.rows[16].cells[1].text if len(info_table.rows[16].cells[1].text) != 0 else ''

    # опубликовано
    response['published'] = info_table.rows[17].cells[1].text if len(info_table.rows[17].cells[1].text) != 0 else ''

    # место хранения
    response['place_storage'] = info_table.rows[18].cells[1].text if len(info_table.rows[18].cells[1].text) != 0 else ''

    # варианты
    response['variants'] = info_table.rows[19].cells[1].text if len(info_table.rows[19].cells[1].text) != 0 else ''

    # дополнительная иформация
    response['note'] = info_table.rows[20].cells[1].text if len(info_table.rows[20].cells[1].text) != 0 else ''

    return Response(response)
    main_table = tables[1]

    temp = ''
    original = ContentFile(b'')
    translation = ContentFile(b'')
    commentary = ContentFile(b'')

    for cell in main_table.columns[0].cells:
        temp = cell.text + '\n'
        original.write(temp.encode('utf-8'))

    for cell in main_table.columns[1].cells:
        temp = cell.text + '\n'
        translation.write(temp.encode('utf-8'))

    for cell in main_table.columns[2].cells:
        temp = cell.text + '\n'
        commentary.write(temp.encode('utf-8'))


    original_r = Resource()
    original_r.source.save('original_' + name,  original)

    trans_r = Resource()
    trans_r.source.save('translation_' + name,  translation)

    comment_r = Resource()
    comment_r.source.save('comment_' + name,  commentary)




    original_r.save()
    trans_r.save()
    comment_r.save()


    return HttpResponse(status=200)
Ejemplo n.º 16
0
def uploadDocx(request, pk):
    if request.method == 'POST':
        file_d = request.FILES.get('attached_file', None)
        name = str(datetime.datetime.now().time())[:8]
        if file_d is None:
            return HttpResponse(status=200)
        docx_file = Document(file_d)
        tables = docx_file.tables
        info_table = tables[0]

        corpus = Corpus.objects.get(pk=pk)
        r_type = ResourceType.objects.all().filter(name='Текст').first()
        resource = Resource(
            corpus=corpus,
            resource_type=r_type,
            title=info_table.rows[0].cells[1].text if len(
                info_table.rows[0].cells[1].text) != 0 else 'Не указано',
            title_origin=info_table.rows[1].cells[1].text if len(
                info_table.rows[1].cells[1].text) != 0 else 'Не указано',
            language=info_table.rows[2].cells[1].text if len(
                info_table.rows[2].cells[1].text) != 0 else 'Не указано',
            dialect=info_table.rows[3].cells[1].text if len(
                info_table.rows[3].cells[1].text) != 0 else 'Не указано',
            speech=info_table.rows[4].cells[1].text if len(
                info_table.rows[4].cells[1].text) != 0 else 'Не указано',
            theme=info_table.rows[5].cells[1].text if len(
                info_table.rows[5].cells[1].text) != 0 else 'Не указано',
            time_of_recording=info_table.rows[6].cells[1].text if len(
                info_table.rows[6].cells[1].text) != 0 else 'Не указано',

            published=info_table.rows[15].cells[1].text if len(
                info_table.rows[15].cells[1].text) != 0 else 'Не указано',
            place_of_storage=info_table.rows[16].cells[1].text if len(
                info_table.rows[16].cells[1].text) != 0 else 'Не указано',
            variants=info_table.rows[17].cells[1].text if len(
                info_table.rows[17].cells[1].text) != 0 else 'Не указано',
            areal=info_table.rows[18].cells[1].text if len(
                info_table.rows[18].cells[1].text) != 0 else 'Не указано',
            extras=info_table.rows[19].cells[1].text if len(
                info_table.rows[19].cells[1].text) != 0 else 'Не указано'
        )

        main_table = tables[1]

        temp = ''
        original = ContentFile(b'')
        translation = ContentFile(b'')
        commentary = ContentFile(b'')

        for cell in main_table.columns[0].cells:
            temp = cell.text + '\n'
            original.write(temp.encode('utf-8'))

        for cell in main_table.columns[2].cells:
            temp = cell.text + '\n'
            translation.write(temp.encode('utf-8'))

        for cell in main_table.columns[3].cells:
            temp = cell.text + '\n'
            commentary.write(temp.encode('utf-8'))

        resource.save()
        resource_texts = ResourceTexts(resource=resource)
        resource_texts.save()

        resource_texts.original.save('original_' + name + '.txt', original)
        resource_texts.translation.save(
            'translation_' + name + '.txt', translation)
        resource_texts.commentary.save(
            'commentary_' + name + '.txt', commentary)

        return HttpResponse(status=200)
Ejemplo n.º 17
0
def alii():
    folder = 'aliexpress/'
    hdr = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
        'Accept-Encoding': 'none',
        'Accept-Language': 'en-US,en;q=0.8',
        'Connection': 'keep-alive'}
    for i in range(1,5):
        time.sleep(0.5)
        html = Request(
            'https://www.aliexpress.com/category/200000764/shoulder-bags/{}.html'.format(
                i), headers=hdr)
        htmll = urlopen(html).read()
        bsObj = BeautifulSoup(htmll, 'html.parser')
        # namelist = bsObj.find('div',{'id':'list-items'})
        namelist = bsObj.findAll('div', {'class': 'item'})
        print()
        for news in namelist:
            product_link = news.find('a', {'class': 'product'})
            if product_link:
                product_link = 'http:' + product_link.attrs['href']
                product_named = news.find('a', {'class': 'product'})
                product_price = news.find('span', {'class', 'value'})
                image = news.find('img', {'class': 'picCore'})
                image = 'http:' + image.get('src', image.get('image-src'))
                product_price = bytes(str(product_price.text), 'UTF-8')
                product_price = product_price.decode('ascii', 'ignore')
                namelst = bytes(str(product_named.text), 'UTF-8')
                namelst = namelst.decode('ascii', 'ignore')
                htl = Request(image, headers=hdr)
                httl = urlopen(htl).read()

            if Products.objects.filter(name=namelst, shop='aliexpress').exists():
                produc = Products.objects.get(name=namelst, shop='aliexpress')
                # Checks the price
                if produc.price != product_price:
                    # produc.old_price = produc.price
                    # produc.old_price_digit = int(produc.price.replace(',','').replace('\n','').replace('.00',''))
                    # Updates the price
                    produc.price = product_price
                    # Saves the price
                    produc.save()
            else:
                request = requests.get(image, stream=True)
                if request.status_code != requests.codes.ok:
                    continue
                randd_ne = get_random_string(length=10)
                file_name = image.split('/')[-1]
                point_finder = file_name.find('.')
                file_name = file_name[:point_finder] + randd_ne
                lf = tempfile.NamedTemporaryFile()
                for block in request.iter_content(1024 * 8):
                    if not block:
                        break
                    lf.write(block)
                lf = ContentFile(httl)
                product = Products(name=namelst, price=product_price, source_url=product_link, shop='aliexpress',
                                   genre='women-bags')
                product.image.save(folder + file_name[:10], lf)

    for i in range(1,5):
        # time.sleep(0.5)
        html = Request(
            'https://www.aliexpress.com/category/660103/makeup/{}.html'.format(
                i), headers=hdr)
        htmll = urlopen(html).read()
        bsObj = BeautifulSoup(htmll, 'html.parser')
        # namelist = bsObj.find('div',{'id':'list-items'})
        namelist = bsObj.findAll('div', {'class': 'item'})
        for news in namelist:
            product_link = news.find('a', {'class': 'product'})
            if product_link:
                product_link = 'http:' + product_link.attrs['href']
                product_named = news.find('a', {'class': 'product'})
                product_price = news.find('span', {'class', 'value'})
                image = news.find('img', {'class': 'picCore'})
                image = 'http:' + image.get('src', image.get('image-src'))
                product_price = bytes(str(product_price.text), 'UTF-8')
                product_price = product_price.decode('ascii', 'ignore')
                namelst = bytes(str(product_named.text), 'UTF-8')
                namelst = namelst.decode('ascii', 'ignore')
                htl = Request(image, headers=hdr)
                httl = urlopen(htl).read()

            if Products.objects.filter(name=namelst, shop='aliexpress').exists():
                produc = Products.objects.get(name=namelst, shop='aliexpress')
                # Checks the price
                if produc.price != product_price:
                    # price_format = re.findall(r'(?P<id>\d+)',product_price)
                    # produc.old_price = product_price
                    # produc.old_price_digit = int(produc.price.replace(',','').replace('\n','').replace('.00',''))
                    # Updates the price
                    produc.price = product_price
                    # Saves the price

                    produc.save()
            else:
                request = requests.get(image, stream=True)
                if request.status_code != requests.codes.ok:
                    continue
                randd_ne = get_random_string(length=10)
                file_name = image.split('/')[-1]
                point_finder = file_name.find('.')
                file_name = file_name[:point_finder] + randd_ne
                lf = tempfile.NamedTemporaryFile()
                for block in request.iter_content(1024 * 8):
                    if not block:
                        break
                    lf.write(block)
                lf = ContentFile(httl)
                product = Products(name=namelst, price=product_price, source_url=product_link, shop='aliexpress',
                                   genre='makeup')
                product.image.save(folder + file_name[:10], lf)

    for i in range(5):
        # time.sleep(0.2)
        html = Request(
            'https://www.aliexpress.com/category/63705/earphones-headphones/{}.html'.format(
                i), headers=hdr)
        htmll = urlopen(html).read()
        bsObj = BeautifulSoup(htmll, 'html.parser')
        # namelist = bsObj.find('div',{'id':'list-items'})
        namelist = bsObj.findAll('li', {'class': 'list-item'})
        for news in namelist:
            product_link = news.find('a', {'class': 'picRind'})
            if product_link:
                product_link = news.find('a', {'class': 'picRind'})
                product_link = product_link.attrs['href']
                product_named = news.find('a', {'class': 'product'})
                product_price = news.find('span', {'class', 'value'})
                image = news.find('img', {'class': 'picCore'})
                product_price = bytes(str(product_price.text), 'UTF-8')
                product_price = product_price.decode('ascii', 'ignore')
                namelst = bytes(str(product_named.text), 'UTF-8')
                namelst = namelst.decode('ascii', 'ignore')
                product_link = 'http:' + product_link
                image = 'http:' + image.get('src', image.get('image-src'))
                htl = Request(image, headers=hdr)
                httl = urlopen(htl).read()

            if Products.objects.filter(name=namelst, shop='aliexpress').exists():
                produc = Products.objects.get(name=namelst, shop='aliexpress')
                # Checks the price
                if produc.price != product_price:
                    # produc.old_price = produc.price
                    # produc.old_price_digit = int(produc.price.replace(',','').replace('\n','').replace('.00',''))
                    # Updates the price
                    produc.price = product_price
                    # Saves the price

                    produc.save()
            else:
                request = requests.get(image, stream=True)
                if request.status_code != requests.codes.ok:
                    continue
                randd_ne = get_random_string(length=10)
                file_name = image.split('/')[-1]
                point_finder = file_name.find('.')
                file_name = file_name[:point_finder] + randd_ne
                lf = tempfile.NamedTemporaryFile()
                for block in request.iter_content(1024 * 8):
                    if not block:
                        break
                    lf.write(block)
                lf = ContentFile(httl)
                product = Products(name=namelst, price=product_price, source_url=product_link, shop='aliexpress',
                                   genre='headphones')
                product.image.save(folder + file_name[:10], lf)


    # for i in range():
    #     # time.sleep(0.2)
    #     html = Request('https://www.aliexpress.com/category/100003084/hoodies-sweatshirts/{}.html'.format(
    #             i), headers=hdr)
    #     htmll = urlopen(html).read()
    #     bsObj = BeautifulSoup(htmll, 'html.parser')
    #     # namelist = bsObj.find('div',{'id':'list-items'})
    #     namelist = bsObj.findAll('li', {'class': 'list-item'})
    #     for news in namelist:
    #         product_link = news.find('a', {'class': 'picRind'})
    #         if product_link:
    #             product_link = news.find('a', {'class': 'picRind'})
    #             product_link = product_link.attrs['href']
    #             product_named = news.find('a', {'class': 'product'})
    #             product_price = news.find('span', {'class', 'value'})
    #             image = news.find('img', {'class': 'picCore'})
    #             product_price = bytes(str(product_price.text), 'UTF-8')
    #             product_price = product_price.decode('ascii', 'ignore')
    #             namelst = bytes(str(product_named.text), 'UTF-8')
    #             namelst = namelst.decode('ascii', 'ignore')
    #             product_link = 'http:' + product_link
    #             image = 'http:' + image.get('src', image.get('image-src'))
    #             htl = Request(image, headers=hdr)
    #             httl = urlopen(htl).read()

    #         if Products.objects.filter(name=namelst, shop='aliexpress').exists():
    #             produc = Products.objects.get(name=namelst, shop='aliexpress')
    #             # Checks the price
    #             if produc.price != product_price:
    #                 # produc.old_price = produc.price
    #                 # produc.old_price_digit = int(produc.price.replace(',','').replace('\n','').replace('.00',''))
    #                 # Updates the price
    #                 produc.price = product_price
    #                 # Saves the price

    #                 produc.save()
    #         else:
    #             request = requests.get(image, stream=True)
    #             if request.status_code != requests.codes.ok:
    #                 continue
    #             randd_ne = get_random_string(length=10)
    #             file_name = image.split('/')[-1]
    #             point_finder = file_name.find('.')
    #             file_name = file_name[:point_finder] + randd_ne
    #             lf = tempfile.NamedTemporaryFile()
    #             for block in request.iter_content(1024 * 8):
    #                 if not block:
    #                     break
    #                 lf.write(block)
    #             lf = ContentFile(httl)
    #             product = Products(name=namelst, price=product_price, source_url=product_link, shop='aliexpress',
    #                                genre='hoodies')
    #             product.image.save(file_name[:10], lf)
Ejemplo n.º 18
0
def uploadDocx(request):
    file_d = request.FILES['file']
    data = request.data['data']
    data = json.loads(data)
    
    
    node = data['node']
    corpus_id = data['corpus_id']




    name = str(datetime.datetime.now().time())[:8]
    if file_d is None:
        return HttpResponse(status=403)
    docx_file = Document(file_d)
    tables = docx_file.tables
    main_table = tables[1]

    temp = ''
    original = ContentFile(b'')
    translation = ContentFile(b'')
    commentary = ContentFile(b'')

    for cell in main_table.columns[0].cells:
        temp = cell.text + '\n'
        original.write(temp.encode('utf-8'))

    for cell in main_table.columns[1].cells:
        temp = cell.text + '\n'
        translation.write(temp.encode('utf-8'))

    for cell in main_table.columns[2].cells:
        temp = cell.text + '\n'
        commentary.write(temp.encode('utf-8'))

    original_r = Resource()
    original_r.source.save('original_' + name,  original)

    trans_r = Resource()
    trans_r.source.save('translation_' + name,  translation)

    comment_r = Resource()
    comment_r.source.save('comment_' + name,  commentary)

    o = Onthology(DB_URI,DB_USER, DB_PASSWORD)
    origin_node_uri, transaltion_node_uri, commentary_node_uri, origin_node = o.createText(node, corpus_id, original_r.pk, trans_r.pk, comment_r.pk)

    original_r.name = 'original_' + name
    original_r.original_object_uri = origin_node_uri
    original_r.resource_type = 'text'

    trans_r.name = 'translation_' + name
    trans_r.original_object_uri = transaltion_node_uri
    trans_r.resource_type = 'text'

    comment_r.name = 'comment_' + name
    comment_r.original_object_uri = commentary_node_uri
    comment_r.resource_type = 'text'


    original_r.save()
    trans_r.save()
    comment_r.save()


    # return response to update 

    response = {}
    response['resource'] = origin_node
    response['media'] = []
    response['genres'] = []
    response['lang'] = []
    response['events'] = []
    response['media_carrier'] = [
        {
            'file': {
                'name': 'original_' + name,
                'source': original_r.source.url,
                'id':  original_r.id,
                'type': 'text'
            }
        }
    ]

    o.close()
    
    # print('IDIDIDIDIDI:', created_node.id)
    return Response(response)