Exemplo n.º 1
0
def _complete_social_login(request, sociallogin, response):
    if request.user.is_authenticated():
        get_account_adapter().logout(request)
    if sociallogin.is_existing:
        # Login existing user
        ret = _login_social_account(request, sociallogin)
    else:
        # New social user
        ret = _process_signup(request, sociallogin)
        try:
            url = response['profile_image_url']
        except Exception:
            url = response.get('picture')
            if type(url) is dict:
                # Incase of facebook response dict is different.
                url = response.get('picture').get('data').get('url')
        if url:
            url = urllib.request.urlopen(url)
            file = BytesIO(url.read())
            img = ImageFile(file)
            filename = '%s_original.jpg' % (uuid.uuid4())
            img.name = filename
            # ser = ImageSerializer(img)
            Image.objects.create(
                image=img,
                title=img.name,
                content_object=sociallogin.user,
                object_id=sociallogin.user.id,
                image_type='P')
    return ret
def upload_relabel_image(request):
    """upload relable image"""
    part_name = request.data["part_name"]
    labels = request.data["labels"]
    img_data = base64.b64decode(request.data["img"])
    confidence = request.data["confidence"]
    # is_relabel = request.data["is_relabel"]

    parts = Part.objects.filter(name=part_name, is_demo=False)
    if len(parts) == 0:
        logger.error("Unknown Part Name: %s", part_name)
        return JsonResponse({"status": "failed"})

    img_io = io.BytesIO(img_data)

    img = ImageFile(img_io)
    img.name = datetime.datetime.utcnow().isoformat() + ".jpg"
    img_obj = Image(
        image=img,
        part_id=parts[0].id,
        labels=labels,
        confidence=confidence,
        is_relabel=True,
    )
    img_obj.save()

    return JsonResponse({"status": "ok"})
Exemplo n.º 3
0
def capture(request, stream_id):
    """Capture image"""
    for i in range(len(streams)):
        stream = streams[i]
        if stream.id == stream_id:
            img_data = stream.get_frame()
            img_io = io.BytesIO(img_data)
            img = ImageFile(img_io)
            img.name = datetime.datetime.utcnow().isoformat() + ".jpg"
            logger.info(stream)
            logger.info(stream.part_id)
            part_id = request.query_params.get("part_id") or stream.part_id
            if not part_id:
                return JsonResponse({
                    "status": "failed",
                    "reason": "neither Stream and capture request have part_id"
                })
            img_obj = Image(image=img, part_id=part_id)
            img_obj.save()
            img_serialized = ImageSerializer(img_obj,
                                             context={"request": request})
            logger.info(img_serialized.data)

            return JsonResponse({"status": "ok", "image": img_serialized.data})

    return JsonResponse({
        "status": "failed",
        "reason": "cannot find stream_id " + str(stream_id)
    })
Exemplo n.º 4
0
def generate_thumbnail(file):
    _id = uuid4()
    path = os.path.join(temp_path,str(_id))
    with open(path, 'ab+') as f:
        f.write(file.read())
        clip = VideoFileClip(f.name)
        thumbnail = os.path.join('%s.png' % path)
        clip.save_frame(thumbnail, t=uniform(0.1, clip.duration))
        image_file = ImageFile(open(thumbnail, 'rb'))
        image_file.name = "%s.png" % str(_id)
        clip.close()
        return image_file
Exemplo n.º 5
0
def upload_image_to_s3(image_url):
    """
    Download image from a given `image_url` and upload to Amazon S3.

    :param image_url: URL of image to download
    :return: django-filer Image object
    """
    response = requests.get(image_url)

    file_obj = ImageFile(StringIO(response.content))
    file_name = generate_image_filename(file_obj)
    file_obj.name = file_name

    image = Image.objects.create(original_filename=file_name, file=file_obj)

    return image
Exemplo n.º 6
0
def stock_data(request):
    if request.method == 'POST':
        tag = request.POST['tag']
        data, meta_data = ts.get_monthly(symbol=tag)
        data['close'].plot()
        f = BytesIO()
        plt.title(tag + ' monthly stock data')
        plt.savefig(f, format='png')
        # file to be saved in database
        content_file = ImageFile(f)
        content_file.name = tag
        s_create, created = Stock.objects.update_or_create(name_tag=tag,
                                                           defaults={
                                                               'name_tag':
                                                               tag,
                                                               'chart_img':
                                                               content_file
                                                           })
        s_create.save()
        return redirect('home')
Exemplo n.º 7
0
def capture(request, stream_id):
    """Capture image."""
    stream = stream_manager.get_stream_by_id(stream_id)
    if stream:
        img_data = stream.get_frame()
        img_io = io.BytesIO(img_data)
        img = ImageFile(img_io)
        img.name = datetime.datetime.utcnow().isoformat() + ".jpg"
        logger.info(stream)
        logger.info(stream.part_id)
        part_id = request.query_params.get("part_id") or stream.part_id
        camera_id = stream.camera_id
        img_obj = Image(image=img, part_id=part_id, camera_id=camera_id)
        img_obj.save()
        img_serializer = ImageSerializer(img_obj, context={"request": request})
        response_data = {"status": "ok", "image": img_serializer.data}
        # serializer = StreamCaptureResponseSerializer(data=response_data)
        # serializer.is_valid(raise_exception=True)
        return Response(response_data)
    raise StreamNotFoundError
Exemplo n.º 8
0
def capture(request, stream_id):
    for i in range(len(streams)):
        stream = streams[i]
        if stream.id == stream_id:
            img_data = stream.get_frame()
            img_io = io.BytesIO(img_data)
            img = ImageFile(img_io)
            img.name = datetime.datetime.utcnow().isoformat() + '.jpg'
            print(stream)
            print(stream.part_id)
            img_obj = Image(image=img, part_id=stream.part_id)
            img_obj.save()
            img_serialized = ImageSerializer(img_obj,
                                             context={'request': request})
            print(img_serialized.data)

            return JsonResponse({'status': 'ok', 'image': img_serialized.data})

    return JsonResponse({
        'status': 'failed',
        'reason': 'cannot find stream_id ' + str(stream_id)
    })
Exemplo n.º 9
0
def upload_relabel_image(request):
    part_name = request.data['part_name']
    labels = request.data['labels']
    img_data = base64.b64decode(request.data['img'])
    confidence = request.data['confidence']
    is_relabel = request.data['is_relabel']

    parts = Part.objects.filter(name=part_name)
    if len(parts) == 0:
        print('[ERROR] Unknown Part Name', part_name)
        return JsonResponse({'status': 'failed'})

    img_io = io.BytesIO(img_data)

    img = ImageFile(img_io)
    img.name = datetime.datetime.utcnow().isoformat() + '.jpg'
    img_obj = Image(image=img,
                    part_id=parts[0].id,
                    labels=labels,
                    confidence=confidence,
                    is_relabel=True)
    img_obj.save()

    return JsonResponse({'status': 'ok'})
Exemplo n.º 10
0
 def _create_django_img(self, file_path, folder_name, file_name):
     from django.core.files.images import ImageFile
     django_img_fp = ImageFile(open(file_path, "rb"))
     django_img_fp.name = folder_name + "/" + file_name
     return django_img_fp
Exemplo n.º 11
0
    def train_algorithm_on_hass_instance(self, request):
        # get web model of dataset
        person_name = request.POST.get("person_select", "")

        algo = self.get_sel_algorithm()
        # todo change this by adding an option in front end to choose data instance
        datainstance = DataInstance.objects.filter(id=2)[0]
        dataset = Dataset.objects.filter(name="homeassistant")[0]
        person = Person.objects.filter(name=person_name)[0]

        algo.selected_person = person
        algo.selected_dataset = dataset
        algo.save()

        model_name = self._create_model_name(algo, person, datainstance)
        folder_name = self._generate_folder_name(algorithm=algo,
                                                 preset_dataset=False)
        tmp_folder_name = 'tmp/' + folder_name

        tmp_model_file_path = self.get_media_file_path(tmp_folder_name,
                                                       MODEL_FILE_NAME)
        self._create_media_model_folder_if_not_exists(tmp_folder_name)
        tmp_model_image_file_path = self.get_media_file_path(
            tmp_folder_name, MODEL_IMG_NAME)
        tmp_loss_file_path = self.get_media_file_path(tmp_folder_name,
                                                      TRAIN_LOSS_FILE_NAME)
        tmp_loss_image_file_path = self.get_media_file_path(
            tmp_folder_name, TRAIN_LOSS_IMG_NAME)
        tmp_acc_file_path = self.get_media_file_path(tmp_folder_name,
                                                     TRAIN_ACC_FILE_NAME)

        ctrl = self._create_ctrl_for_hass_instance(algo, person, datainstance,
                                                   model_name)
        ctrl.init_model_on_dataset(model_name)
        ctrl.register_benchmark(model_name)
        ctrl.register_loss_file_path(tmp_loss_file_path, model_name)

        ctrl.train_model(model_name)
        # workaround, saving the file beforehand and loading it again
        # because joblib doesn't support buffer stuff
        #self._clean_tmp_folder()

        ctrl.save_model(tmp_model_file_path, model_name)
        ctrl.save_plot_trainloss(model_name, tmp_loss_image_file_path)
        # todo make this work
        #ctrl.save_visualization_to_file(tmp_model_image_file_path, model_name)

        from django.core.files.base import File
        from django.core.files.images import ImageFile

        # normal
        django_model_file = File(open(tmp_model_file_path, "rb"))
        django_model_file.name = folder_name + "/" + MODEL_FILE_NAME

        #django_model_img = ImageFile(open(tmp_model_image_file_path, "rb"))
        #django_model_img.name = folder_name + "/" + MODEL_IMG_NAME

        django_loss_file = File(open(tmp_loss_file_path, "rb"))
        django_loss_file.name = folder_name + "/" + TRAIN_LOSS_FILE_NAME

        django_loss_image = ImageFile(open(tmp_loss_image_file_path, "rb"))
        django_loss_image.name = folder_name + "/" + TRAIN_LOSS_IMG_NAME

        print(django_model_file.name)

        model = Model(
            algorithm=algo,
            person=algo.selected_person,  # can be null
            dataset=algo.selected_dataset,
            datainstance=datainstance,
            file=django_model_file,
            #visualization=django_model_img,
            visualization=None,
            train_loss=django_loss_file,
            train_loss_graph=django_loss_image
            #train_acc=django_acc_file,
        )
        model.save()
        os.remove(tmp_model_file_path)
        os.remove(tmp_loss_file_path)
Exemplo n.º 12
0
    def train_algorithm_on_preset_dataset(self, request):
        dataset_name = request.POST.get("dataset_select", "")
        algo = self.get_sel_algorithm()
        dataset = Dataset.objects.filter(name=dataset_name)[0]
        algo.selected_dataset = dataset
        algo.save()

        self._train_algorithm_on_preset_dataset_forehand_cleanup(algo=algo)
        folder_name = self._generate_folder_name(algorithm=algo,
                                                 preset_dataset=True)
        tmp_folder_name = 'tmp/' + folder_name

        tmp_model_file_path = self.get_media_file_path(tmp_folder_name,
                                                       MODEL_FILE_NAME)
        self._create_media_model_folder_if_not_exists(tmp_folder_name)
        tmp_model_image_file_path = self.get_media_file_path(
            tmp_folder_name, MODEL_IMG_NAME)
        tmp_loss_file_path = self.get_media_file_path(tmp_folder_name,
                                                      TRAIN_LOSS_FILE_NAME)
        tmp_loss_image_file_path = self.get_media_file_path(
            tmp_folder_name, TRAIN_LOSS_IMG_NAME)
        tmp_acc_file_path = self.get_media_file_path(tmp_folder_name,
                                                     TRAIN_ACC_FILE_NAME)

        ctrl = self._create_ctrl_for_normal_dataset(algo, dataset)
        ctrl.init_model_on_dataset()
        ctrl.register_benchmark()

        #ctrl.register_loss_file_path('/home/cmeier/code/tmp/kasteren/train_loss.log')
        ctrl.register_loss_file_path(tmp_loss_file_path)

        print('~' * 100)
        print('~' * 100)
        print('loss_fn: ', tmp_loss_file_path)
        # todo set file path to acc logs in ctrl
        # todo set file path to train logs in ctrl
        #ctrl.register_acc_file_path(tmp_loss_file_path)
        #ctrl.register_acc_file_path(tmp_acc_file_path)
        #print('acc_fn: ', tmp_acc_file_path)
        #print('~'*100)
        #print('~'*100)

        ctrl.train_model([False])
        # workaround, saving the file beforehand and loading it again
        # because joblib doesn't support buffer stuff
        #self._clean_tmp_folder()

        ctrl.save_model(tmp_model_file_path)
        ctrl.save_loss_plot_to_file(tmp_loss_image_file_path)
        ctrl.save_visualization_to_file(tmp_model_image_file_path)

        from django.core.files.base import File
        from django.core.files.images import ImageFile

        # normal
        django_model_file = File(open(tmp_model_file_path, "rb"))
        django_model_file.name = folder_name + "/" + MODEL_FILE_NAME

        django_model_img = ImageFile(open(tmp_model_image_file_path, "rb"))
        django_model_img.name = folder_name + "/" + MODEL_IMG_NAME

        #django_acc_file = File(open(tmp_acc_file_path, "rb"))
        #django_acc_file.name = folder_name + TRAIN_ACC_FILE_NAME

        django_loss_file = File(open(tmp_loss_file_path, "rb"))
        django_loss_file.name = folder_name + "/" + TRAIN_LOSS_FILE_NAME

        django_loss_image = ImageFile(open(tmp_loss_image_file_path, "rb"))
        django_loss_image.name = folder_name + "/" + TRAIN_LOSS_IMG_NAME

        print(django_model_file.name)

        model = Model(
            algorithm=algo,
            person=algo.selected_person,  # can be null
            dataset=algo.selected_dataset,
            file=django_model_file,
            visualization=django_model_img,
            train_loss=django_loss_file,
            train_loss_graph=django_loss_image
            #train_acc=django_acc_file,
        )
        model.save()

        os.remove(tmp_model_file_path)
        os.remove(tmp_loss_file_path)
Exemplo n.º 13
0
    def create(self, request):
        # Validate the incoming input (provided through post parameters)
        serializer = ImageSerializers(data=request.data,
                                      context={'request': request})
        serializer.is_valid(raise_exception=True)
        name = serializer.validated_data.get('name')
        pic = serializer.validated_data.get('pic')
        pic_urls = serializer.validated_data.get('urls')
        res_zip = serializer.validated_data.get('zip_result')
        heavy = serializer.validated_data.get('heavy')

        pic_urls = pic_urls.split('?')[0]

        print(pic_urls)
        if not pic_urls:
            im_path = settings.MEDIA_ROOT + 'pics/' + pic.name
            if os.path.exists(im_path):
                print("Già caricato!")
                os.remove(im_path)
            else:
                print("Non ancora caricato!")

            obj = ImageUploaded(name=name,
                                pic=pic,
                                zip_result=res_zip,
                                heavy=heavy)
            obj.save()
            num_classes = 25
            model_weights = settings.WEIGHTS_ROOT + 'model.ckpt-1600'
            save_dir = './output/'
            scale = 1
            with open(im_path, 'r+b') as f:
                with Image.open(f) as image:
                    original_size = image.size
                    scale = 1
                    image.thumbnail((600, 600), Image.BICUBIC)
                    image.save(im_path, image.format)
            im = Image.open(im_path)
            t1 = time.time()
            preds, scores = persistent_sess.run([y, y_score],
                                                feed_dict={x: np.array(im)})
            mask_file, my_json = predict(preds, scores, im_path, scale,
                                         num_classes, save_dir, heavy, t1)
            if res_zip:
                # Create response zip file
                my_json['original_size'] = original_size
                with open(save_dir + 'json_data.json', 'w') as outfile:
                    json.dump(my_json, outfile)
                zipf = zipfile.ZipFile('result_data.zip', 'w',
                                       zipfile.ZIP_DEFLATED)
                zipf.write(save_dir + 'mask.png')
                zipf.write(save_dir + 'json_data.json')
                zipf.close()
                zipf_tDownload = open("result_data.zip", 'r')
                response = HttpResponse(zipf_tDownload,
                                        content_type="application/zip")
                response[
                    'Content-Disposition'] = 'attachment; filename="result_data.zip"'
                return response
            else:
                my_json['original_size'] = original_size
                json_data = json.dumps(my_json)
                return HttpResponse(json_data, content_type="application/json")
        else:
            try:
                print(pic_urls)
                r = requests.get(pic_urls, stream=True)
                if r.status_code == 200:
                    with open(
                            os.path.join(os.path.dirname(__file__), 'img.jpg'),
                            'wb+') as f:
                        r.raw.decode_content = True
                        shutil.copyfileobj(r.raw, f)
                        out_file = open(
                            os.path.join(os.path.dirname(__file__), 'img.jpg'),
                            'rb+')
                        F = ImageFile(out_file)
                        F.name = os.path.basename(pic_urls)
            except:
                raise ValidationError(
                    "Impossibile scaricare correttamente l'immmagine dal web.")
            im_path = settings.MEDIA_ROOT + 'pics/' + F.name
            if os.path.exists(im_path):
                print("Già caricato!")
                os.remove(im_path)
            else:
                print("Non ancora caricato!")

            obj = ImageUploaded(name=name,
                                urls=pic_urls,
                                pic=F,
                                zip_result=res_zip,
                                heavy=heavy)
            obj.save()
            num_classes = 25
            model_weights = settings.WEIGHTS_ROOT + 'model.ckpt-1600'
            save_dir = './output/'
            scale = 1
            im_path = im_path.replace('%', '')
            with open(im_path, 'r+b') as f:
                with Image.open(f) as image:
                    original_size = image.size
                    scale = 1
                    image.thumbnail((600, 600), Image.BICUBIC)
                    image.save(im_path, image.format)
            im = Image.open(im_path)
            t1 = time.time()
            preds, scores = persistent_sess.run([y, y_score],
                                                feed_dict={x: np.array(im)})
            mask_file, my_json = predict(preds, scores, im_path, scale,
                                         num_classes, save_dir, heavy, t1)
            if res_zip:
                # Create response zip file
                my_json['original_size'] = original_size
                with open(save_dir + 'json_data.json', 'w') as outfile:
                    json.dump(my_json, outfile)
                zipf = zipfile.ZipFile('result_data.zip', 'w',
                                       zipfile.ZIP_DEFLATED)
                zipf.write(save_dir + 'mask.png')
                zipf.write(save_dir + 'json_data.json')
                zipf.close()

                zipf_tDownload = open("result_data.zip", 'r')

                response = HttpResponse(zipf_tDownload,
                                        content_type="application/zip")
                response[
                    'Content-Disposition'] = 'attachment; filename="result_data.zip"'
                return response
            else:
                my_json['original_size'] = original_size
                json_data = json.dumps(my_json)
                return HttpResponse(json_data, content_type="application/json")
Exemplo n.º 14
0
def upload_relabel_image(request):
    """upload_relabel_image.

    Args:
        request:
    """

    part_name = request.data["part_name"]
    labels = request.data["labels"]
    img_data = base64.b64decode(request.data["img"])
    confidence = request.data["confidence"]
    # is_relabel = request.data["is_relabel"]

    # FIXME: Inferenece should send request using part id instead of part_name
    parts = Part.objects.filter(name=part_name, is_demo=False)
    if len(parts) == 0:
        logger.error("Unknown Part Name: %s", part_name)
        return Response({"status": "failed"})

    part = parts[0]

    # FIXME: use part foreign key to get project
    project_objs = Project.objects.filter(is_demo=part.is_demo)
    if len(project_objs) <= 0:
        logger.error("Cannot found project objects")
        return Response(
            {
                "status": "failed",
                "log": "Cannot found project objects"
            },
            status=status.HTTP_400_BAD_REQUEST)

    project_obj = project_objs[0]

    # Relabel images count exceed project.maxImages
    if project_obj.maxImages <= len(
            Image.objects.filter(
                project=project_obj, part=part, is_relabel=True)):
        logger.info("Already reach project maxImages limit")

        # Delete some images if already exceed maxImages
        for _ in range(
                len(
                    Image.objects.filter(
                        project=project_obj, part=part, is_relabel=True)) -
                project_obj.maxImages):
            Image.objects.filter(project=project_obj,
                                 part=part,
                                 is_relabel=True).last().delete()
        return Response(
            {
                "status": "failed",
                'log': 'Already reach project maxImages limit'
            },
            status=status.HTTP_400_BAD_REQUEST)

    # Relabel images count does not exceed project.maxImages
    # Handled by signals

    confidence_float = float(confidence) * 100
    # Confidence check
    if (confidence_float < project_obj.accuracyRangeMin or \
         confidence_float > project_obj.accuracyRangeMax):
        logger.error("Inferenece confidence %s out of range", confidence_float)
        logger.error("range %s ~ %s", project_obj.accuracyRangeMin,
                     project_obj.accuracyRangeMax)

        return Response(
            {
                "status": "failed",
                'log': 'Confidence out of range',  # yapf...
            },
            status=status.HTTP_400_BAD_REQUEST)

    # All pass
    img_io = io.BytesIO(img_data)

    img = ImageFile(img_io)
    img.name = datetime.datetime.utcnow().isoformat() + ".jpg"
    img_obj = Image(
        image=img,
        part_id=part.id,
        labels=labels,
        confidence=confidence,
        project=project_obj,
        is_relabel=True,
    )
    img_obj.save()

    return Response({"status": "ok"})
Exemplo n.º 15
0
    def upload_relabel_image(self, request, pk=None) -> Response:
        """upload_relabel_image.

        Args:
            request:
        """
        queryset = self.get_queryset()
        instance = drf_get_object_or_404(queryset, pk=pk)
        serializer = UploadRelabelSerializer(data=request.data)
        serializer.is_valid(raise_exception=True)

        # FIXME: Inferenece should send part id instead of part_name
        part = drf_get_object_or_404(
            instance.parts, name=serializer.validated_data["part_name"])
        drf_get_object_or_404(instance.cameras,
                              pk=serializer.validated_data["camera_id"])

        project_obj = instance.project
        if project_obj is None:
            raise PdRelabelWithoutProject

        if project_obj.is_demo:
            raise PdRelabelDemoProjectError

        # Relabel images count does not exceed project.maxImages
        # Handled by signals

        confidence_float = serializer.validated_data["confidence"] * 100
        # Confidence check
        if (confidence_float < instance.accuracyRangeMin
                or confidence_float > instance.accuracyRangeMax):
            logger.error("Inferenece confidence %s out of range",
                         confidence_float)
            raise PdRelabelConfidenceOutOfRange

        # Relabel images count does not exceed project.maxImages
        if (instance.maxImages > Image.objects.filter(
                project=project_obj, part=part, is_relabel=True).count()):
            img_io = serializer.validated_data["img"].file

            img = ImageFile(img_io)
            img.name = str(timezone.now()) + ".jpg"
            img_obj = Image(
                image=img,
                part_id=part.id,
                camera_id=serializer.validated_data["camera_id"],
                labels=serializer.validated_data["labels"],
                confidence=serializer.validated_data["confidence"],
                project=instance.project,
                is_relabel=True,
            )
            img_obj.save()
            return Response({"status": "ok"})

        # User is not relabling and exceed maxImages
        # queue...
        logger.info(project_obj.relabel_expired_time)
        logger.info(timezone.now())
        if project_obj.relabel_expired_time < timezone.now():
            logger.info("Queuing relabel images...")
            img_io = serializer.validated_data["img"].file
            img = ImageFile(img_io)
            img.name = str(timezone.now()) + ".jpg"
            img_obj = Image(
                image=img,
                camera_id=serializer.validated_data["camera_id"],
                part_id=part.id,
                labels=serializer.validated_data["labels"],
                confidence=serializer.validated_data["confidence"],
                project=project_obj,
                is_relabel=True,
            )
            img_obj.save()
            # pop
            earliest_img = (Image.objects.filter(
                project=project_obj, part=part,
                is_relabel=True).order_by("timestamp").first())
            if earliest_img is not None:
                earliest_img.delete()
            return Response({"status": "ok"})
            # pop image

        # User is relabeling and exceed maxImages
        for _ in range(
                Image.objects.filter(
                    project=project_obj, part=part, is_relabel=True).count() -
                instance.maxImages):
            Image.objects.filter(
                project=project_obj, part=part,
                is_relabel=True).order_by("timestamp").last().delete()
        raise PdRelabelImageFull