Esempio n. 1
0
File: eval.py Progetto: zyg11/VKD
def main():
    conf = Conf()
    conf.suppress_random()
    device = conf.get_device()

    args = parse(conf)

    # ---- SAVER OLD NET TO RESTORE PARAMS
    saver_trinet = Saver(
        Path(args.trinet_folder).parent,
        Path(args.trinet_folder).name)
    old_params, old_hparams = saver_trinet.load_logs()
    args.backbone = old_params['backbone']
    args.metric = old_params['metric']

    train_loader, query_loader, gallery_loader, queryimg_loader, galleryimg_loader = \
        get_dataloaders(args.dataset_name, conf.nas_path, device, args)
    num_pids = train_loader.dataset.get_num_pids()

    assert num_pids == old_hparams['num_classes']

    net = get_model(args, num_pids).to(device)
    state_dict = torch.load(
        Path(args.trinet_folder) / 'chk' / args.trinet_chk_name)
    net.load_state_dict(state_dict)

    e = Evaluator(net,
                  query_loader,
                  gallery_loader,
                  queryimg_loader,
                  galleryimg_loader,
                  device=device,
                  data_conf=DATA_CONFS[args.dataset_name])

    e.eval(None, 0, verbose=True, do_tb=False)
def export_db():
    location = gui.filesavebox(msg=ms["choose_save_location"], title=ms["save"],
                               default=cf.get("home_dir") + os.sep + 'db' + EXTENSION, filetypes=["*" + EXTENSION])
    cf.set("home_dir", os.path.abspath(os.path.dirname(location)))
    if location is not None and not location.endswith(EXTENSION):
        location += EXTENSION
    patients = JsonLoader.get_all_patients()
    excel_builder = excel.ExcelBuilder(patients, file_name=location)
    excel_builder.write_patients()
Esempio n. 3
0
def main(conffile,imfile):
	#ap = argparse.ArgumentParser()
	#ap.add_argument("-c", "--conf", required=True, help="path to configuration file")
	#ap.add_argument("-i", "--image", required=True, help="path to the image to generate the sample")
	#args = vars(ap.parse_args())


	conf = Conf(conffile)
	image = cv2.imread(imfile)

	# First, we read the parameters
	problem = conf["problem"]
	annotationMode = conf["annotation_mode"]
	outputMode = conf["output_mode"]
	generationMode = conf["generation_mode"]
	inputPath = conf["input_path"]
	parameters = conf["parameters"]
	augmentationTechniques = conf["augmentation_techniques"]
	print(augmentationTechniques)
	# Second, we create the augmentor
	augmentor = createAugmentor(problem,annotationMode,outputMode,generationMode,inputPath,
		                    parameters)


	# We apply the augmentation
	images = []
	for (technique,parameters) in augmentationTechniques:
	    tech = createTechnique(technique,parameters)
	    im = Generator(tech).applyForClassification(image)
	    cv2.putText(im,technique,(10,10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),1)
	    images.append(im)

	cv2.imshow("Mosaic",generateMosaic(images))
	cv2.waitKey(0)
Esempio n. 4
0
def main(conf):
    #ap = argparse.ArgumentParser()
    #ap.add_argument("-c", "--conf", required=True, help="path to configuration file")
    #args = vars(ap.parse_args())

    conf = Conf(conf)

    # First, we read the parameters
    problem = conf["problem"]
    annotationMode = conf["annotation_mode"]
    outputMode = conf["output_mode"]
    generationMode = conf["generation_mode"]
    inputPath = conf["input_path"]
    parameters = conf["parameters"]
    augmentationTechniques = conf["augmentation_techniques"]

    # Second, we create the augmentor
    augmentor = createAugmentor(problem, annotationMode, outputMode,
                                generationMode, inputPath, parameters)

    # Third, we load the techniques and add them to the augmentor
    techniques = [
        createTechnique(technique, parameters)
        for (technique, parameters) in augmentationTechniques
    ]

    for technique in techniques:
        augmentor.addGenerator(Generator(technique))

    # Finally, we apply the augmentation
    augmentor.applyAugmentation()
    def load_ui_wokspace(self, embed, root, wh, ww):
        self.show_malar_angles_ui = tk.BooleanVar(
            value=cf.get("show_malar_angles", True))
        malar_angles_checkbox = tk.Checkbutton(
            embed,
            text=ms["show_malar_angles"],
            variable=self.show_malar_angles_ui,
            command=self.toggle_visible_angles)

        self.show_interocular_angles_ui = tk.BooleanVar(
            value=cf.get("show_interocular_angles", True))
        interocular_angles_checkbox = tk.Checkbutton(
            embed,
            text=ms["show_interocular_angles"],
            variable=self.show_interocular_angles_ui,
            command=self.toggle_visible_angles)

        self.show_chin_angles_ui = tk.BooleanVar(
            value=cf.get("show_chin_angles", True))
        chin_angles_checkbox = tk.Checkbutton(
            embed,
            text=ms["show_chin_angles"],
            variable=self.show_chin_angles_ui,
            command=self.toggle_visible_angles)
        deleteBtn = tk.Button(embed,
                              text=ms["delete_last"],
                              command=self.delete_mark,
                              width=15)
        propsBtn = tk.Button(embed,
                             text=ms["show_props"],
                             command=self.show_proportions,
                             width=15)
        clearBtn = tk.Button(embed,
                             text=ms["delete_all"],
                             command=self.delete_all,
                             width=15)
        screen = tk.Canvas(root, width=ww, height=wh)
        screen.grid(row=0, column=1, sticky=tk.W + tk.N)
        malar_angles_checkbox.grid(sticky=tk.W, padx=6)
        interocular_angles_checkbox.grid(sticky=tk.W, padx=6)
        chin_angles_checkbox.grid(sticky=tk.W, padx=6)
        propsBtn.grid(sticky=tk.W, padx=6)
        deleteBtn.grid(sticky=tk.W, padx=6)
        clearBtn.grid(sticky=tk.W + tk.S, pady=35, padx=6)
        screen.grid(sticky=tk.W)
        self.toggle_visible_angles()
        return screen
Esempio n. 6
0
def _load_select_image():
    img = gui.fileopenbox(ms["select_image"],
                          ms["image_Selection"],
                          default=cf.get("home_file"),
                          filetypes=[[
                              "*.jpg", "*.jpeg", "*.png", "*.bmp", "*.gif",
                              "IMAGE files"
                          ]])
    return img
def _record_image(patient):
    img = fileopenbox(ms["select_image"],
                      ms["image_Selection"],
                      default=cf.get("home_file"),
                      filetypes=[[
                          "*.jpg", "*.jpeg", "*.png", "*.bmp", "*.gif",
                          "IMAGE files"
                      ]])
    if img is None:
        raise LeftIncompleteException
    patient.photo = img
 def toggle_visible_angles(self):
     self.workspace.show_chin_angles = self.show_chin_angles_ui.get()
     self.workspace.show_interocular_angles = self.show_interocular_angles_ui.get(
     )
     self.workspace.show_malar_angles = self.show_malar_angles_ui.get()
     self.workspace.toggle_chin_angles()
     self.workspace.toggle_interocular_angles()
     self.workspace.toggle_malar_angles()
     cf.set("show_chin_angles", self.workspace.show_chin_angles)
     cf.set("show_interocular_angles",
            self.workspace.show_interocular_angles)
     cf.set("show_malar_angles", self.workspace.show_malar_angles)
Esempio n. 9
0
def parse(conf: Conf):
    parser = argparse.ArgumentParser(description='Train img to video model')
    parser = conf.add_default_args(parser)

    parser.add_argument('teacher', type=str)
    parser.add_argument('--teacher_chk_name', type=str, default='chk_end')

    parser.add_argument('--student', type=str)
    parser.add_argument('--student_chk_name', type=str, default='chk_end')

    parser.add_argument('--exp_name', type=str, default=str(uuid4()))
    parser.add_argument('--num_generations', type=int, default=1)

    parser.add_argument('--eval_epoch_interval', type=int, default=50)
    parser.add_argument('--print_epoch_interval', type=int, default=5)

    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--lr_decay', type=float, default=0.1)
    parser.add_argument('--temp', type=float, default=10.)
    parser.add_argument('--lambda_coeff', type=float, default=0.0001)
    parser.add_argument('--kl_coeff', type=float, default=0.1)

    parser.add_argument('--num_train_images', type=int, default=8)
    parser.add_argument('--num_student_images', type=int, default=2)

    parser.add_argument('--train_strategy', type=str, default='multiview',
                        choices=['multiview', 'temporal'])

    parser.add_argument('--num_epochs', type=int, default=400)
    parser.add_argument('--gamma', type=float, default=0.1)
    parser.add_argument('--first_milestone', type=int, default=300)
    parser.add_argument('--step_milestone', type=int, default=50)

    parser.add_argument('--reinit_l4', type=str2bool, default=True)
    parser.add_argument('--reinit_l3', type=str2bool, default=False)

    parser.add_argument('--logits_dist', type=str, default='kl',
                        choices=['kl', 'mse'])

    args = parser.parse_args()
    args.use_random_erasing = True

    return args
Esempio n. 10
0
File: eval.py Progetto: zyg11/VKD
def parse(conf: Conf):

    parser = argparse.ArgumentParser(description='Train img to video model')
    parser = conf.add_default_args(parser)

    parser.add_argument('trinet_folder',
                        type=str,
                        help='Path to TriNet base folder.')
    parser.add_argument('--trinet_chk_name',
                        type=str,
                        help='checkpoint name',
                        default='chk_end')

    args = parser.parse_args()
    args.train_strategy = 'chunk'
    args.use_random_erasing = False
    args.num_train_images = 0

    return args
Esempio n. 11
0
def parse(conf: Conf):

    parser = argparse.ArgumentParser(description='Train img to video model')
    parser = conf.add_default_args(parser)

    parser.add_argument('--exp_name',
                        type=str,
                        default=str(uuid4()),
                        help='Experiment name.')
    parser.add_argument('--metric',
                        type=str,
                        default='euclidean',
                        choices=['euclidean', 'cosine'],
                        help='Metric for distances')
    parser.add_argument('--num_train_images',
                        type=int,
                        default=8,
                        help='Num. of bag images.')

    parser.add_argument('--num_epochs', type=int, default=300)
    parser.add_argument('--eval_epoch_interval', type=int, default=50)
    parser.add_argument('--save_epoch_interval', type=int, default=50)
    parser.add_argument('--print_epoch_interval', type=int, default=5)

    parser.add_argument('--wd', type=float, default=1e-5)

    parser.add_argument('--gamma', type=float, default=0.1)
    parser.add_argument('--first_milestone', type=int, default=200)
    parser.add_argument('--step_milestone', type=int, default=50)

    parser.add_argument('--use_random_erasing', type=str2bool, default=True)
    parser.add_argument('--train_strategy',
                        type=str,
                        default='chunk',
                        choices=['multiview', 'chunk'])

    args = parser.parse_args()
    return args
Esempio n. 12
0
def parse(conf: Conf):

    parser = argparse.ArgumentParser(description='Train img to video model')
    parser = conf.add_default_args(parser)

    parser.add_argument('net1', type=str, help='Path to TriNet base folder.')
    parser.add_argument('--chk_net1',
                        type=str,
                        help='checkpoint name',
                        default='chk_end')
    parser.add_argument('net2', type=str, help='Path to TriNet base folder.')
    parser.add_argument('--chk_net2',
                        type=str,
                        help='checkpoint name',
                        default='chk_end')
    parser.add_argument('--dest_path', type=Path, default='/tmp/heatmaps_out')

    args = parser.parse_args()
    args.train_strategy = 'multiview'
    args.use_random_erasing = False
    args.num_train_images = 0
    args.img_test_batch = 32

    return args
Esempio n. 13
0
def main():
    conf = Conf()
    args = parse(conf)
    device = conf.get_device()

    conf.suppress_random(set_determinism=args.set_determinism)
    saver = Saver(conf.log_path, args.exp_name)

    train_loader, query_loader, gallery_loader, queryimg_loader, galleryimg_loader = \
        get_dataloaders(args.dataset_name, conf.nas_path, device, args)

    num_pids = train_loader.dataset.get_num_pids()

    net = nn.DataParallel(get_model(args, num_pids))
    net = net.to(device)

    saver.write_logs(net.module, vars(args))

    opt = Adam(net.parameters(), lr=1e-4, weight_decay=args.wd)
    milestones = list(
        range(args.first_milestone, args.num_epochs, args.step_milestone))
    scheduler = lr_scheduler.MultiStepLR(opt,
                                         milestones=milestones,
                                         gamma=args.gamma)

    triplet_loss = OnlineTripletLoss('soft', True, reduction='mean').to(device)
    class_loss = nn.CrossEntropyLoss(reduction='mean').to(device)

    print("EXP_NAME: ", args.exp_name)

    for e in range(args.num_epochs):

        if e % args.eval_epoch_interval == 0 and e > 0:
            ev = Evaluator(net, query_loader, gallery_loader, queryimg_loader,
                           galleryimg_loader, DATA_CONFS[args.dataset_name],
                           device)
            ev.eval(saver, e, args.verbose)

        if e % args.save_epoch_interval == 0 and e > 0:
            saver.save_net(net.module, f'chk_{e // args.save_epoch_interval}')

        avm = AvgMeter(['triplet', 'class'])

        for it, (x, y, cams) in enumerate(train_loader):
            net.train()

            x, y = x.to(device), y.to(device)

            opt.zero_grad()
            embeddings, f_class = net(x, return_logits=True)

            triplet_loss_batch = triplet_loss(embeddings, y)
            class_loss_batch = class_loss(f_class, y)
            loss = triplet_loss_batch + class_loss_batch

            avm.add([triplet_loss_batch.item(), class_loss_batch.item()])

            loss.backward()
            opt.step()

        if e % args.print_epoch_interval == 0:
            stats = avm()
            str_ = f"Epoch: {e}"
            for (l, m) in stats:
                str_ += f" - {l} {m:.2f}"
                saver.dump_metric_tb(m, e, 'losses', f"avg_{l}")
            saver.dump_metric_tb(opt.param_groups[0]['lr'], e, 'lr', 'lr')
            print(str_)

        scheduler.step()

    ev = Evaluator(net, query_loader, gallery_loader, queryimg_loader,
                   galleryimg_loader, DATA_CONFS[args.dataset_name], device)
    ev.eval(saver, e, args.verbose)

    saver.save_net(net.module, 'chk_end')
    saver.writer.close()
Esempio n. 14
0
from keras.optimizers import SGD
import argparse
from utils.conf import Conf
from augmentors.augmentorFactory import createAugmentor
from augmentors.generator import Generator
from techniques.techniqueFactory import createTechnique
from utils.minivgg import MiniVGGNet

ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to configuration file")
args = vars(ap.parse_args())

conf = Conf(args["conf"])
# First, we read the parameters
problem = conf["problem"]
annotationMode = conf["annotation_mode"]
outputMode = conf["output_mode"]

if not(outputMode=="keras"):
    exit()

generationMode = conf["generation_mode"]
inputPath = conf["input_path"]
parameters = conf["parameters"]
augmentationTechniques = conf["augmentation_techniques"]

# Second, we create the augmentor
augmentor = createAugmentor(problem,annotationMode,outputMode,generationMode,inputPath,
                            parameters)

# Third, we load the techniques and add them to the augmentor
def saveImagePathAsHome(patient):
    file = os.path.abspath(patient.photo)
    cf.set("home_file", file)
    if not cf.isIn("home_dir") or not os.path.isdir(cf.get("home_dir")):
        directory = os.path.abspath(os.path.dirname(file)) + os.sep
        cf.set("home_dir", directory)
Esempio n. 16
0
                str_ = f"Epoch: {self._epoch}"
                for (l, m) in stats:
                    str_ += f" - {l} {m:.2f}"
                    self.saver.dump_metric_tb(m, self._epoch, 'losses', f"avg_{l}")
                self.saver.dump_metric_tb(opt.defaults['lr'], self._epoch, 'lr', 'lr')
                print(str_)

            self._epoch += 1

        self._gen += 1

        return student_net


if __name__ == '__main__':
    conf = Conf()
    device = conf.get_device()
    args = parse(conf)

    conf.suppress_random(set_determinism=args.set_determinism)

    train_loader, query_loader, gallery_loader, queryimg_loader, galleryimg_loader = \
        get_dataloaders(args.dataset_name, conf.nas_path, device, args)

    teacher_net: TriNet = Saver.load_net(args.teacher,
                                         args.teacher_chk_name, args.dataset_name).to(device)

    student_net: TriNet = deepcopy(teacher_net) if args.student is None \
        else Saver.load_net(args.student, args.student_chk_name, args.dataset_name)
    student_net = student_net.to(device)
Esempio n. 17
0
    )
    ap.add_argument(
        "--video-file",
        required=False,
        help=
        "Full path to video file to read from. If this is not set, then the Webcam will be used."
    )
    ap.add_argument(
        "--pascal-voc",
        required=False,
        help=
        "Path to rectangle annotated file in PascalVOC format with ROIs to look for motion"
    )
    args = vars(ap.parse_args())

    conf = Conf(args['bg_config'])

    if conf['display_video']:
        original_window_name = 'Original'
        cv2.namedWindow(original_window_name, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(original_window_name, 600, 600)
        cv2.moveWindow(original_window_name, 600, 100)

    if conf['display_mask']:
        subtractor_name = conf['named_subtractor']
        mask_window_name = f'{subtractor_name} Mask'
        cv2.namedWindow(mask_window_name, cv2.WINDOW_NORMAL)
        cv2.moveWindow(mask_window_name, 200, 250)

    # Determine if we are reading a video or using the computer camera
    if args.get("video_file", None) != None:
Esempio n. 18
0
def main():
    conf = Conf()
    conf.suppress_random()
    device = conf.get_device()

    args = parse(conf)

    dest_path = args.dest_path / (Path(args.net1).name + '__vs__' +
                                  Path(args.net2).name)
    dest_path.mkdir(exist_ok=True, parents=True)

    both_path = dest_path / 'both'
    both_path.mkdir(exist_ok=True, parents=True)

    net1_path = dest_path / Path(args.net1).name
    net1_path.mkdir(exist_ok=True, parents=True)

    net2_path = dest_path / Path(args.net2).name
    net2_path.mkdir(exist_ok=True, parents=True)

    orig_path = dest_path / 'orig'
    orig_path.mkdir(exist_ok=True, parents=True)

    # ---- Restore net
    net1 = Saver.load_net(args.net1, args.chk_net1,
                          args.dataset_name).to(device)
    net2 = Saver.load_net(args.net2, args.chk_net2,
                          args.dataset_name).to(device)

    net1.eval()
    net2.eval()

    train_loader, query_loader, gallery_loader, queryimg_loader, galleryimg_loader = \
        get_dataloaders(args.dataset_name, conf.nas_path, device, args)

    # register hooks
    hook_net_1, hook_net_2 = Hook(), Hook()

    net1.backbone.features_layers[4].register_forward_hook(hook_net_1)
    net2.backbone.features_layers[4].register_forward_hook(hook_net_2)

    dst_idx = 0

    for idx_batch, (vids,
                    *_) in enumerate(tqdm(galleryimg_loader, 'iterating..')):
        if idx_batch < len(galleryimg_loader) - 50:
            continue
        net1.zero_grad()
        net2.zero_grad()

        hook_net_1.reset()
        hook_net_2.reset()

        vids = vids.to(device)
        attn_1 = extract_grad_cam(net1, vids, device, hook_net_1)
        attn_2 = extract_grad_cam(net2, vids, device, hook_net_2)

        B, N_VIEWS = attn_1.shape[0], attn_1.shape[1]

        for idx_b in range(B):
            for idx_v in range(N_VIEWS):

                el_img = vids[idx_b, idx_v]
                el_attn_1 = attn_1[idx_b, idx_v]
                el_attn_2 = attn_2[idx_b, idx_v]

                el_img = el_img.cpu().numpy().transpose(1, 2, 0)
                el_attn_1 = el_attn_1.cpu().numpy()
                el_attn_2 = el_attn_2.cpu().numpy()

                mean, var = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
                el_img = (el_img * var) + mean
                el_img = np.clip(el_img, 0, 1)

                el_attn_1 = cv2.blur(el_attn_1, (3, 3))
                el_attn_1 = cv2.resize(el_attn_1,
                                       (el_img.shape[1], el_img.shape[0]),
                                       interpolation=cv2.INTER_CUBIC)

                el_attn_2 = cv2.blur(el_attn_2, (3, 3))
                el_attn_2 = cv2.resize(el_attn_2,
                                       (el_img.shape[1], el_img.shape[0]),
                                       interpolation=cv2.INTER_CUBIC)

                save_img(el_img, el_attn_1, net1_path / f'{dst_idx}.png')
                save_img(el_img, el_attn_2, net2_path / f'{dst_idx}.png')

                save_img(el_img, None, orig_path / f'{dst_idx}.png')

                save_img(np.concatenate([el_img, el_img], 1),
                         np.concatenate([el_attn_1, el_attn_2], 1),
                         both_path / f'{dst_idx}.png')

                dst_idx += 1
Esempio n. 19
0
from threading import Thread
from imutils.io import TempFile
from imutils.video import VideoStream
from Logix_dir.MotionWriter import KeyClipWriter, Uploader
from rtcbot import Websocket, RTCConnection, CVCamera, CVDisplay

cam = CVCamera()
display = CVDisplay()

trans = 0
PS = False  # pin state resp from Arduino
cams = False  # flag var for webrtc video started

kcw = KeyClipWriter(bufSize=32)

conf = Conf("config/config.json")
up = Uploader(conf)

path = ''

try:
    arduino = serial.Serial('/dev/ttyUSB0', 9600, timeout=.1)
    time.sleep(1)

except:
    print("Arduino not connected")


def ard_snd(msg):
    try:
        arduino.write(msg.encode())
Esempio n. 20
0
def load(language=DEFAULT_LANG):
    global messages
    language = cf.get("language") if cf.isIn("language") else DEFAULT_LANG
    with open("files/lang/%s.json" % language) as f:
        messages = json.load(f)