def multi_get_files(request, fieldname, noajax=False): """ View to retrieve MultiuploaderFiles based on a list of ids. """ if request.method == 'GET': log.info('received GET to get files view') if not u'form_type' in request.GET: response_data = [{"error": _("Error when detecting form type, form_type is missing")}] return HttpResponse(simplejson.dumps(response_data)) signer = Signer() try: form_type = signer.unsign(request.GET.get(u"form_type")) except BadSignature: response_data = [{"error": _("Tampering detected!")}] return HttpResponse(simplejson.dumps(response_data)) #log.info('Got file: "%s"' % filename) result = [] for p in request.GET.getlist(fieldname): fl = MultiuploaderFile.objects.get(id=p) thumb_url = "" try: thumb_url = get_thumbnail(fl.file, "80x80", quality=50) except Exception as e: log.error(e) #generating json response array result.append({"id": fl.id, "name": fl.filename, "size": fl.file.size, "url": reverse('multiuploader_file_link', args=[fl.pk]), "thumbnail_url": thumb_url, "delete_url": reverse('multiuploader_delete', args=[fl.pk]), "delete_type": "POST", }) response_data = simplejson.dumps(result) #checking for json data type #big thanks to Guy Shapiro if noajax: if request.META['HTTP_REFERER']: redirect(request.META['HTTP_REFERER']) if "application/json" in request.META['HTTP_ACCEPT_ENCODING']: mimetype = 'application/json' else: mimetype = 'text/plain' return HttpResponse(response_data, mimetype=mimetype) else: # POST return HttpResponse('Only GET accepted')
def multiuploader(request, noajax=False): """ Main Multiuploader module. Parses data from jQuery plugin and makes database changes. """ if request.method == 'POST': log.info('received POST to main multiuploader view') if request.FILES is None: response_data = [{"error": _('Must have files attached!')}] return HttpResponse(simplejson.dumps(response_data)) if not u'form_type' in request.POST: response_data = [{"error": _("Error when detecting form type, form_type is missing")}] return HttpResponse(simplejson.dumps(response_data)) signer = Signer() try: form_type = signer.unsign(request.POST.get(u"form_type")) except BadSignature: response_data = [{"error": _("Tampering detected!")}] return HttpResponse(simplejson.dumps(response_data)) form = MultiUploadForm(request.POST, request.FILES, form_type=form_type) if not form.is_valid(): error = _("Unknown error") if "file" in form._errors and len(form._errors["file"]) > 0: error = form._errors["file"][0] response_data = [{"error": error}] return HttpResponse(simplejson.dumps(response_data)) file = request.FILES[u'file'] wrapped_file = UploadedFile(file) filename = wrapped_file.name file_size = wrapped_file.file.size log.info('Got file: "%s"' % filename) #writing file manually into model #because we don't need form of any type. fl = MultiuploaderFile() fl.filename = filename fl.file = file fl.save() log.info('File saving done') thumb_url = "" try: thumb_url = get_thumbnail(fl.file, "80x80", quality=50) except Exception as e: log.error(e) #generating json response array result = [{"id": fl.id, "name": filename, "size": file_size, "url": reverse('multiuploader_file_link', args=[fl.pk]), "thumbnail_url": thumb_url, "delete_url": reverse('multiuploader_delete', args=[fl.pk]), "delete_type": "POST", }] response_data = simplejson.dumps(result) #checking for json data type #big thanks to Guy Shapiro if noajax: if request.META['HTTP_REFERER']: redirect(request.META['HTTP_REFERER']) if "application/json" in request.META['HTTP_ACCEPT_ENCODING']: mimetype = 'application/json' else: mimetype = 'text/plain' return HttpResponse(response_data, mimetype=mimetype) else: # GET return HttpResponse('Only POST accepted')
def set_thumbnail(self, item, column): self.current_thumbnail = QPixmap() self.current_thumbnail.loadFromData(get_thumbnail(get_thumbnail_url(video=item.video)).read()) self.thumbnail_preview.setPixmap(self.current_thumbnail)
def compute_feats(bbdf): X_pos = [] X_i = [] ids = [] file_counter = 1 prev_iid, prev_img = (None, None) # FIXME, for debugging only! Reduced size or starting with offset # bbdf = bbdf[28524:] # bbdf[54000:] for n, row in tqdm(bbdf.iterrows(), total=len(bbdf)): this_icorpus = row['i_corpus'] this_image_id = row['image_id'] this_region_id = row['region_id'] this_bb = row['bb'] # 2016-04-08: as note for future: When extracting # feats for imagenet regions, must # - create combined filename out of image_id and region_id # - neutralise positional features, by setting bb given # to it to 0,0,w,h. So that all ImageNet regions # end up with same positions. if code_icorpus[this_icorpus] == 'image_net': this_image_id_mod = join_imagenet_id(this_image_id, this_region_id) this_bb_mod = [0,0,this_bb[2],this_bb[3]] else: this_image_id_mod = this_image_id this_bb_mod = this_bb if np.min(this_bb_mod[2:]) <= 0: print 'skipping over this image (%s,%d). 0 bb! %s' % \ (code_icorpus[this_icorpus], this_image_id, str(this_bb_mod)) continue (prev_iid, prev_img), img_resized = \ get_thumbnail((prev_iid, prev_img), this_icorpus, this_image_id_mod, this_bb) if len(prev_img.shape) != 3 or \ (len(prev_img.shape) == 3 and prev_img.shape[2] != 3): print 'skipping over this image (%s,%d). b/w?' % \ (code_icorpus[this_icorpus], this_image_id) continue # If we continue below this line, getting region worked X_i.append(img_resized) this_pos_feats = compute_posfeats(prev_img, this_bb_mod) X_pos.append(this_pos_feats) ids.append(np.array([this_icorpus, this_image_id, this_region_id])) # is it time to do the actual extraction on this batch # and write out to disk? if (n+1) % img_batch_size == 0: filename = BASETEMPLATE_TMP % (code_icorpus[this_icorpus], file_counter) print strftime("%Y-%m-%d %H:%M:%S") print "new batch!", n, file_counter, filename try: X = gltr.transform(X_i) except ValueError as e: print 'Exception! But why? Skipping this whole batch..' X_i = [] ids = [] X_pos = [] continue #raise e X_ids = np.array(ids) X_pos = np.array(X_pos) print X_ids.shape, X.shape, X_pos.shape X_f = np.hstack([X_ids, X, X_pos]) with gzip.open(filename, 'w') as f: pickle.dump(X_f, f) print X_f.shape ids = [] X_pos = [] X_i = [] file_counter += 1 # and back to the for loop # we're done, so what we have needs to be processed in any case filename = BASETEMPLATE_TMP % (code_icorpus[this_icorpus], file_counter) print strftime("%Y-%m-%d %H:%M:%S") print "final batch!", n, file_counter, filename X = gltr.transform(X_i) X_ids = np.array(ids) X_pos = np.array(X_pos) X_f = np.hstack([X_ids, X, X_pos]) with gzip.open(filename, 'w') as f: pickle.dump(X_f, f) print X_f.shape
def compute_feats(bbdf): X_pos = [] X_i = [] ids = [] file_counter = 1 prev_iid, prev_img = (None, None) # FIXME, for debugging only! Reduced size or starting with offset # bbdf = bbdf[28524:] # bbdf[54000:] for n, row in tqdm(bbdf.iterrows(), total=len(bbdf)): this_icorpus = row['i_corpus'] this_image_id = row['image_id'] this_region_id = row['region_id'] this_bb = row['bb'] # 2016-04-08: as note for future: When extracting # feats for imagenet regions, must # - create combined filename out of image_id and region_id # - neutralise positional features, by setting bb given # to it to 0,0,w,h. So that all ImageNet regions # end up with same positions. if code_icorpus[this_icorpus] == 'image_net': this_image_id_mod = join_imagenet_id(this_image_id, this_region_id) this_bb_mod = [0, 0, this_bb[2], this_bb[3]] else: this_image_id_mod = this_image_id this_bb_mod = this_bb if np.min(this_bb_mod[2:]) <= 0: print 'skipping over this image (%s,%d). 0 bb! %s' % \ (code_icorpus[this_icorpus], this_image_id, str(this_bb_mod)) continue (prev_iid, prev_img), img_resized = \ get_thumbnail((prev_iid, prev_img), this_icorpus, this_image_id_mod, this_bb) if len(prev_img.shape) != 3 or \ (len(prev_img.shape) == 3 and prev_img.shape[2] != 3): print 'skipping over this image (%s,%d). b/w?' % \ (code_icorpus[this_icorpus], this_image_id) continue # If we continue below this line, getting region worked X_i.append(img_resized) this_pos_feats = compute_posfeats(prev_img, this_bb_mod) X_pos.append(this_pos_feats) ids.append(np.array([this_icorpus, this_image_id, this_region_id])) # is it time to do the actual extraction on this batch # and write out to disk? if (n + 1) % img_batch_size == 0: filename = BASETEMPLATE_TMP % (code_icorpus[this_icorpus], file_counter) print strftime("%Y-%m-%d %H:%M:%S") print "new batch!", n, file_counter, filename try: X = gltr.transform(X_i) except ValueError as e: print 'Exception! But why? Skipping this whole batch..' X_i = [] ids = [] X_pos = [] continue #raise e X_ids = np.array(ids) X_pos = np.array(X_pos) print X_ids.shape, X.shape, X_pos.shape X_f = np.hstack([X_ids, X, X_pos]) with gzip.open(filename, 'w') as f: pickle.dump(X_f, f) print X_f.shape ids = [] X_pos = [] X_i = [] file_counter += 1 # and back to the for loop # we're done, so what we have needs to be processed in any case filename = BASETEMPLATE_TMP % (code_icorpus[this_icorpus], file_counter) print strftime("%Y-%m-%d %H:%M:%S") print "final batch!", n, file_counter, filename X = gltr.transform(X_i) X_ids = np.array(ids) X_pos = np.array(X_pos) X_f = np.hstack([X_ids, X, X_pos]) with gzip.open(filename, 'w') as f: pickle.dump(X_f, f) print X_f.shape
def serve_thumbnail(image): print(f"Requested thumb {image}") path, image = utils.get_thumbnail(image) # return "Document not found!", 404 return send_from_directory(path, image)