コード例 #1
0
def main():
    """

    Run train and predict for the various Lorenz map prediction models with user
    provided arguments. Assets are saved in the 'assets' folder in the project directory.

    Models can be Conditional Wavenet-inspired (cw), Unconditional Wavenet-inspired (w),

    Targets to predict are x (ts=0), y(ts=1), or z(ts=2) Lorenz trajectories.
    """

    argparser = ArgParser()
    options = argparser.parse_args()
    data_generator = LorenzMapData(options)
    train_data, test_data = data_generator.generate_train_test_sets()

    # Train
    trainer = Train(options)
    train_iter = DIterators(options).build_iterator(train_data, for_train=True)
    trainer.train(train_iter)

    # Predict on test set and evaluate
    predictor = Predict(options)
    predict_iter = DIterators(options).build_iterator(test_data, for_train=False)
    predictor.predict(predict_iter)

    # Evaluate performance on test set
    evaluator = Evaluate(options)
    evaluator()
コード例 #2
0
def _parse_arguments():
    """
    Parse the command arguments.

    Returns
    -------
    The command arguments as a dictionary : dict
    """
    parser = ArgParser(description="A charting tool.")

    group = parser.add_mutually_exclusive_group()
    group.add_argument("--version", action="version", version=VERSION)

    args = parser.parse_args()
    return args
コード例 #3
0
ファイル: Parser.py プロジェクト: Racconkek/Mp3Parser
def handle_argument_commands(parser):
    file_name = None
    ar = ArgParser()
    try:
        file_name, commands = ar.parse_arguments()
        for command in commands:
            if command == 'parse tag1':
                parser.parse_tag1(file_name)
            elif command == 'parse tag2':
                parser.parse_tag2(file_name)
            elif command == 'parse':
                parser.parse_file(file_name)
    except Exception as e:
        print(str(e))

    return file_name
コード例 #4
0
ファイル: DragonTrainer.py プロジェクト: e96031413/DRAGON
 def create_training_dir(self):
     # check if directory already exists
     if os.path.exists(self.training_dir):
         print(f"Training dir {self.training_dir} already exists..")
         if os.path.exists(
                 os.path.join(self.training_dir, "best-checkpoint")):
             print("Found pretrained model")
             return False
         else:
             raise Exception(
                 f"Training dir {self.training_dir} already exists.. "
                 f"No pretrained model found...")
     print(f"Current training directory for this run: {self.training_dir}")
     os.makedirs(self.training_dir)
     # save current hyper params to training dir
     ArgParser.save_to_file(UserArgs, self.training_dir, self.model_name)
     return True
コード例 #5
0
ファイル: main.py プロジェクト: slaclab/switchtest
def _parse_arguments():
    """
    Parse the command arguments.

    Returns
    -------
    The command arguments as a dictionary : dict
    """
    parser = ArgParser(description="Test hardware responsiveness using IPMI commands.")
    parser.add_argument("config-file", help="The name of test configuration file.")

    parser.add_argument("--verbose-logging", action="store_true")
    parser.add_argument("--version", action="version", version=VERSION)

    args = parser.parse_args()
    return args
コード例 #6
0
def main():
    parser = ArgParser()
    args = parser.parse_args()

    gen = Generator(args.latent_dim).to(args.device)
    disc = Discriminator().to(args.device)
    if args.device != 'cpu':
        gen = nn.DataParallel(gen, args.gpu_ids)
        disc = nn.DataParallel(disc, args.gpu_ids)
    # gen = gen.apply(weights_init)
    # disc = disc.apply(weights_init)

    gen_opt = torch.optim.RMSprop(gen.parameters(), lr=args.lr)
    disc_opt = torch.optim.RMSprop(disc.parameters(), lr=args.lr)
    gen_scheduler = torch.optim.lr_scheduler.LambdaLR(gen_opt, lr_lambda=lr_lambda(args.num_epochs))
    disc_scheduler = torch.optim.lr_scheduler.LambdaLR(disc_opt, lr_lambda=lr_lambda(args.num_epochs))
    disc_loss_fn = DiscriminatorLoss().to(args.device)
    gen_loss_fn = GeneratorLoss().to(args.device)

    # dataset = Dataset()
    dataset = MNISTDataset()
    loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers)

    logger = TrainLogger(args, len(loader), phase=None)
    logger.log_hparams(args)

    if args.privacy_noise_multiplier != 0:
        privacy_engine = PrivacyEngine(
            disc,
            batch_size=args.batch_size,
            sample_size=len(dataset),
            alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
            noise_multiplier=.8,
            max_grad_norm=0.02,
            batch_first=True,
        )
        privacy_engine.attach(disc_opt)
        privacy_engine.to(args.device)

    for epoch in range(args.num_epochs):
        logger.start_epoch()
        for cur_step, img in enumerate(tqdm(loader, dynamic_ncols=True)):
            logger.start_iter()
            img = img.to(args.device)
            fake, disc_loss = None, None
            for _ in range(args.step_train_discriminator):
                disc_opt.zero_grad()
                fake_noise = get_noise(args.batch_size, args.latent_dim, device=args.device)
                fake = gen(fake_noise)
                disc_loss = disc_loss_fn(img, fake, disc)
                disc_loss.backward()
                disc_opt.step()

            gen_opt.zero_grad()
            fake_noise_2 = get_noise(args.batch_size, args.latent_dim, device=args.device)
            fake_2 = gen(fake_noise_2)
            gen_loss = gen_loss_fn(img, fake_2, disc)
            gen_loss.backward()
            gen_opt.step()
            if args.privacy_noise_multiplier != 0:
                epsilon, best_alpha = privacy_engine.get_privacy_spent(args.privacy_delta)

            logger.log_iter_gan_from_latent_vector(img, fake, gen_loss, disc_loss, epsilon if args.privacy_noise_multiplier != 0 else 0)
            logger.end_iter()

        logger.end_epoch()
        gen_scheduler.step()
        disc_scheduler.step()
コード例 #7
0
from mirrors_manager import MirrorsManager
from arg_parser import ArgParser
from logger import Logger

logger = Logger(__name__)
logger = logger.logger

args = ArgParser().parse_args()
logger.debug(f'Args which were received are:\n{args}')

mirrors_manager = MirrorsManager(args)
mirrors_manager.run()
コード例 #8
0

@app.route('/setall', methods=['POST'])
def set_all():
    color = request.args.get('color')
    if color == 'red':
        pixelservice.set_all(200, 0, 0)
    else:
        r = request.args.get('r') or 0
        g = request.args.get('g') or 0
        b = request.args.get('b') or 0
        pixelservice.set_all(int(r), int(g), int(b))
    return "OK"


@app.route('/setpattern', methods=['POST'])
def set_pattern():
    name = request.args.get('name')
    if not name:
        return 'invalid pattern name', 400
    pixelservice.set_pattern(name)
    return "OK"


if __name__ == '__main__':
    args = ArgParser(DESCRIPTION).parse_args()
    host = args.host if args.host else None
    pixelservice = PixelService(NUM_PIXELS)
    pixelservice.start_displayer(args.device)
    app.run(host)
コード例 #9
0
#!/usr/bin/python

from arg_parser import ArgParser
from loader import Loader

if __name__ == '__main__':
	arg_parser = ArgParser()
	options = arg_parser.get_options()

	loader = Loader(options)
	if loader.error:
		print loader.error_message
		exit(1)
コード例 #10
0
def main():
    arg_parser = ArgParser()
    args = arg_parser.get_args()

    input_file = args.input

    # If input file defined then use it else use the webcam
    if input_file:
        if not os.path.isfile(input_file):
            log.error("Input file cannot be found")
            exit()
        input_feeder = InputFeeder("video", input_file)
    else:
        input_feeder = InputFeeder("cam")

    face_detection_model = FaceDetection(args.face_detection_model,
                                         args.device, args.extensions)
    face_detection_model.load_model()

    facial_landmarks_model = FacialLandmarksDetection(
        args.facial_landmark_detection_model, args.device, args.extensions)
    facial_landmarks_model.load_model()

    gaze_model = GazeEstimation(args.gaze_estimation_model, args.device,
                                args.extensions)
    gaze_model.load_model()

    head_pose_model = HeadPoseEstimation(args.head_pose_estimation_model,
                                         args.device, args.extensions)
    head_pose_model.load_model()

    mouse_controller = MouseController('medium', 'fast')

    input_feeder.load_data()

    frame_count = 0
    total_face_detection_inference_time = 0
    total_facial_landmark_inference_time = 0
    total_head_pose_inference_time = 0
    total_gaze_estimation_inference_time = 0
    total_inference_time = 0
    for ret, frame in input_feeder.next_batch():

        if not ret:
            log.error("ret variable not found")
            break

        frame_count += 1

        if frame_count % args.mouse_update_interval == 0:
            cv2.imshow('Input', frame)

        key_pressed = cv2.waitKey(60)

        # Run inference on the face detection model
        start_time = time.time()
        cropped_face, face_coordinates = face_detection_model.predict(
            frame.copy(), args.probability_threshold)
        finish_time = time.time()
        total_face_detection_inference_time += finish_time - start_time
        total_inference_time += finish_time - start_time

        # If no face detected get the next frame
        if len(face_coordinates) == 0:
            continue

        # Run inference on the facial landmark detection model
        start_time = time.time()
        results = facial_landmarks_model.predict(cropped_face.copy())
        finish_time = time.time()
        left_eye_coordinates = results[0]
        right_eye_coordinates = results[1]
        left_eye_image = results[2]
        right_eye_image = results[3]
        left_eye_crop_coordinates = results[4]
        right_eye_crop_coordinates = results[5]
        total_facial_landmark_inference_time += finish_time - start_time
        total_inference_time += finish_time - start_time

        # Run inference on the head pose estimation model
        start_time = time.time()
        head_pose = head_pose_model.predict(cropped_face.copy())
        finish_time = time.time()
        total_head_pose_inference_time += finish_time - start_time
        total_inference_time += finish_time - start_time

        # Run inference on the gaze estimation model
        start_time = time.time()
        new_mouse_x_coordinate, new_mouse_y_coordinate, gaze_vector = gaze_model.predict(
            left_eye_image, right_eye_image, head_pose)
        finish_time = time.time()
        total_gaze_estimation_inference_time += finish_time - start_time
        total_inference_time += finish_time - start_time

        if frame_count % args.mouse_update_interval == 0:
            log.info("Mouse controller new coordinates: x = {}, y = {}".format(
                new_mouse_x_coordinate, new_mouse_y_coordinate))
            mouse_controller.move(new_mouse_x_coordinate,
                                  new_mouse_y_coordinate)

            # Optional visualization configuration:
            if args.show_detected_face:
                showDetectedFace(frame, face_coordinates)

            if args.show_head_pose:
                showHeadPose(frame, head_pose)

            if args.show_facial_landmarks:
                showFacialLandmarks(cropped_face, left_eye_crop_coordinates,
                                    right_eye_crop_coordinates)

            if args.show_gaze_estimation:
                showGazeEstimation(frame, right_eye_coordinates,
                                   left_eye_coordinates, gaze_vector,
                                   cropped_face, face_coordinates)

        # Break if escape key pressed
        if key_pressed == 27:
            log.warning("Keyboard interrupt triggered")
            break

    # Release the capture and destroy any OpenCV windows
    cv2.destroyAllWindows()
    input_feeder.close()
    log.info("Average face detection inference time: {} seconds".format(
        total_face_detection_inference_time / frame_count))
    log.info(
        "Average facial landmark detection inference time: {} seconds".format(
            total_facial_landmark_inference_time / frame_count))
    log.info("Average head pose estimation inference time: {} seconds".format(
        total_head_pose_inference_time / frame_count))
    log.info("Average gaze estimation inference time: {} seconds".format(
        total_gaze_estimation_inference_time / frame_count))
    log.info("Average total inference time: {} seconds".format(
        total_inference_time / frame_count))
コード例 #11
0
 def create_arg_parser(self):
     if (self._arg_parser == None):
         self._arg_parser = ArgParser(self)
         return self._arg_parser
     else:
         return self._arg_parser
コード例 #12
0
ファイル: commands.py プロジェクト: lapakota/kv_storage
 def execute_command(self, storage, input_data):
     str_command, arg1, arg2 = ArgParser.parse_args(input_data)
     try:
         return self.handle_command(str_command, storage, arg1, arg2)
     except KeyError:
         return 'Error with input data'