def to_masm(self): return util.postprocess("\n\n".join([ self.directives, self.enums.to_masm(), self.structs.to_masm(), self.segments.to_masm() ]))
def to_masm(self): return util.postprocess("\n\n".join([ self.directives, names.get_public_decls(), self.enums.to_masm(), self.structs.to_masm(), self.segments.to_masm() ]))
def test(): CLASSES = [] label_map_path = '../models/labelmap.prototxt' with open(label_map_path) as f: lines = f.readlines() for x in range(3, len(lines), 5): CLASSES.append( ((lines[x].split(": "))[1]).replace("\"", "").replace("\n", "")) print(CLASSES) COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3)) # frame dimensions should be sqaure PREPROCESS_DIMS = (300, 300) DISPLAY_DIMS = (900, 900) # calculate the multiplier needed to scale the bounding boxes DISP_MULTIPLIER = DISPLAY_DIMS[0] // PREPROCESS_DIMS[0] # Load the model net = cv2.dnn.readNet('../models/no_bn.xml', '../models/no_bn.bin') # Specify target device net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD) for filename in ["test1.png", "test2.jpg", "test3.jpg"]: image = cv2.imread(filename) origimg = np.copy(image) image = util.preprocess_image(image) #image = image.transpose((2, 0, 1)) #blob = cv2.dnn.blobFromImage(image, size=PREPROCESS_DIMS) net.setInput(image) outputs = net.forward() box, conf, cls = util.postprocess(origimg, outputs) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0, 255, 0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imwrite(filename.split(".")[0] + "SSD.jpg", origimg)
def is_mangled(name, ea): return util.postprocess(name) != get_demangled_name(ea)
# Load pretrained model model = models.resnet34(pretrained=True) # Load and classify image im = Image.open("cat.jpg") # taken from ImageNet test set im = im.resize((224, 224), Image.ANTIALIAS) true_label, true_prob = classify(model, util.preprocess(im)) print("True label: {}, prob: {}".format(true_label, true_prob)) # Generate adversarial example that will correspond to target_class target_class = 800 #5 print("Target class:", imagenet_labels.label(target_class)) adverserial_image = fgs.fgs(model, util.preprocess(im), target_class, targeted=True, alpha=0.01, iterations=10, use_cuda=CUDA) adverserial_image = util.postprocess(adverserial_image) adv_label, adv_prob = classify(model, util.preprocess(adverserial_image)) print("Predicted label: {}, prob: {}".format(adv_label, adv_prob)) # Plot results plt.subplot(131) plt.title("Before: {} {}%".format(true_label, true_prob)) before_im = np.array(im) plt.imshow(before_im) plt.subplot(132) plt.title("After: {} {}%".format(adv_label, adv_prob)) after_im = np.array(adverserial_image) plt.imshow(after_im) plt.subplot(133) plt.title("Added noise") noise = after_im - before_im
def to_masm(self): return util.postprocess("\n\n".join([self.directives, names.get_public_decls(), self.enums.to_masm(), self.structs.to_masm(), self.segments.to_masm()]))
def to_masm(self): return util.postprocess("\n\n".join([self.directives, self.enums.to_masm(), self.structs.to_masm(), self.segments.to_masm()]))
best_scale = np.argmax(scales) new_images = [] for image in images: new_images.append( cv.resize(image, scale_list[best_scale])) feed_dict = { images_plh: np.array(new_images), labels_plh: labels, is_training_plh: True, best_scale_plh: no_scale } pred = sess.run(model.predict, feed_dict=feed_dict) bboxes, scores, classes = util.postprocess(pred) map_list.append( util.compute_map(classes, bboxes, gt_bboxes)) precision_list.append( util.compute_precision(classes, bboxes, gt_bboxes)) recall_list.append( util.compute_recall(classes, bboxes, gt_bboxes)) if end_of_epoch: break test_map = np.mean(map_list) test_precision = np.mean(precision_list)