Example #1
0
    def create_analytic(*, model: Any, instance: Any, request: Any) -> None:
        analytic_signal.send(sender=model, instance=instance, request=request)

        logger.success(f"analytic data was created for {instance}")
Example #2
0
    optimizer = torch.optim.Adam(
        [
            {
                'params': dt_model.gp_layer.parameters()
            },
            {
                'params': likelihood.parameters()
            },
        ],
        lr=0.01,
    )

    dt_model.train()
    likelihood.train()
    # We would have to set a new loss object each time.
    mll = gpytorch.mlls.VariationalELBO(likelihood,
                                        dt_model.gp_layer,
                                        num_data=num_tasks)

    optimizer.zero_grad()
    output = dt_model(test_shape_x)
    loss = -mll(output, test_shape_y)
    logger.error(output.rsample().size())
    logger.success(loss)
    loss.backward()
    optimizer.step()

    new_shape_x = torch.randn(2, 4, 5, 5)

    output = dt_model(new_shape_x)
Example #3
0
def create_animation(
    frame_folder: Path,
    animation_folder: Path,
    observable: str,
    animation_type: str,
    time_slice: Optional[int] = None,
    frame_rate: Optional[int] = 10,
) -> None:
    """
    Method for creating animations from generated volumetric figures.

    Args:
        frame_folder: folder path to figures that will be be stitched together.
        animation_folder: folder path to place animations in.
        observable: observable we are creating an animation..
        animation_type: format of animation. Available: 'gif', 'avi' or 'mp4'
        time_slice: optional, eucl time slice.
        frame_rate: frames per second of animation.

    Raises:
        NameError: if animation_type is not recognized.
    """

    # Removes spaces
    observable = observable.replace(" ", "_")

    input_paths = frame_folder / "frame_t%02d.png"

    if time_slice:
        animation_path = animation_folder / (
            f"{observable.lower()}_{time_slice}.{animation_type}"
        )
    else:
        animation_path = animation_folder / (
            f"{observable.lower()}.{animation_type}"
        )

    if animation_type == "gif":
        cmd = [
            "convert",
            "-delay",
            "1",
            "-loop",
            "0",
            str(frame_folder / "*.png"),
            str(animation_path),
        ]

    elif animation_type == "mp4":
        cmd = [
            "ffmpeg",
            "-r",
            str(frame_rate),
            "-start_number",
            "0",
            "-i",
            str(input_paths),
            "-c:v",
            "libx264",
            "-crf",
            "0",
            "-preset",
            "veryslow",
            "-c:a",
            "libmp3lame",
            "-b:a",
            "320k",
            "-y",
            str(animation_path),
        ]

    elif animation_type == "avi":
        cmd = [
            "ffmpeg",
            "-r",
            str(frame_rate),
            "-i",
            str(input_paths),
            "-y",
            "-qscale:v",
            "0",
            str(animation_path),
        ]
    else:
        raise NameError(
            f"{animation_type} is not a recognized animation type."
        )

    logger.info(f"Running command: {' '.join(cmd)}")

    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    _ = proc.stdout.read()

    logger.success(f"Animation {animation_path} created.")
Example #4
0
    logger.info("Starting the segmenter control process (step 4/4)")
    segmenter_thread = planktoscope.segmenter.SegmenterProcess(
        shutdown_event, "/home/pi/data"
    )
    segmenter_thread.start()

    # Starts the module process
    # Uncomment here as needed
    # logger.info("Starting the module process")
    # module_thread = planktoscope.module.ModuleProcess(shutdown_event)
    # module_thread.start()

    logger.info("Starting the display module")
    display = planktoscope.display.Display()

    logger.success("Looks like everything is set up and running, have fun!")
    planktoscope.light.ready()

    while run:
        # TODO look into ways of restarting the dead threads
        logger.trace("Running around in circles while waiting for someone to die!")
        if not stepper_thread.is_alive():
            logger.error("The stepper process died unexpectedly! Oh no!")
            break
        if imager_thread and not imager_thread.is_alive():
            logger.error("The imager process died unexpectedly! Oh no!")
            break
        if not segmenter_thread.is_alive():
            logger.error("The segmenter process died unexpectedly! Oh no!")
            break
        time.sleep(1)
Example #5
0
def main():
	"""
	Main gameloop driver.
	"""
	logger.info("Starting game")
	pygame.init()

	screen = pygame.display.set_mode((WIDTH, HEIGHT))
	clock = pygame.time.Clock()
	screen.fill(pygame.Color("white"))
	gs = GameState()
	load_images()

	running = True
	# Flag for when a move is made, triggers get_valid_moves-call
	move_made = False
	valid_moves = gs.get_valid_moves()
	# Keeps track og last clicked square: (x, y)
	selected_square = ()
	# Keeps track of player clicks: [(x, y), (x, y)]
	player_clicks = []
	while running:
		for event in pygame.event.get():
			if event.type == pygame.QUIT:
				logger.info("Exiting")
				running = False
			elif event.type == pygame.MOUSEBUTTONDOWN:
				pos = pygame.mouse.get_pos()
				col, row = pos[0] // SQUARE_SIZE, pos[1] // SQUARE_SIZE
				# Same square clicked twice
				if selected_square == (row, col):
					# Undo last clicks
					selected_square = ()
					player_clicks = []
				else:
					selected_square = (row, col)
					player_clicks.append(selected_square)
				if len(player_clicks) == 2:
					move = Move(gs.board, player_clicks[0], player_clicks[1])
					if move in valid_moves:
						move = valid_moves[valid_moves.index(move)]
						gs.make_move(move)
						move_made = True
						selected_square = ()
						player_clicks = []
						logger.success(f"Move: {move.PGN}")
					else:
						logger.error(f"{move.move_notation()} - Not a valid move")
						player_clicks = [selected_square]
			elif event.type == pygame.KEYDOWN:
				# Undo move on left arrow-key
				if event.key == pygame.K_LEFT:
					gs.undo_move()
					# As valid_moves has been refreshed with the last move, undoing it needs to generate a new move-list again
					move_made = True

		if move_made:
			valid_moves = gs.get_valid_moves()
			move_made = False
		if gs.stalemate or gs.checkmate:
			running = False
			if gs.stalemate:
				logger.info("Stalemate")
			if gs.checkmate:
				logger.info("Mate!")

		draw_game_state(screen, gs)
		clock.tick(MAX_FPS)
		pygame.display.flip()
Example #6
0
    def run(self, show_progress_bar=None):
        """Main routine of the simulation.

        Note:
            Make sure to call :meth:`setup` prior to this function.

        Arguments:
            show_progress_bar (:obj:`bool`, optional): Whether to show fancy progress bar via tqdm.
                By default, only show if stdout is a terminal and Veros is running on a single process.

        """
        vs = self.state

        logger.info('\nStarting integration for {0[0]:.1f} {0[1]}'.format(time.format_time(vs.runlen)))

        start_time, start_iteration = vs.time, vs.itt
        profiler = None

        pbar = progress.get_progress_bar(vs, use_tqdm=show_progress_bar)

        with handlers.signals_to_exception():
            try:
                with pbar:
                    while vs.time - start_time < vs.runlen:
                        with vs.timers['diagnostics']:
                            diagnostics.write_restart(vs)

                        if vs.itt - start_iteration == 3 and rs.profile_mode and rst.proc_rank == 0:
                            # when using bohrium, most kernels should be pre-compiled by now
                            profiler = diagnostics.start_profiler()

                        with vs.timers['main']:
                            self.set_forcing(vs)

                            if vs.enable_idemix:
                                idemix.set_idemix_parameter(vs)

                            with vs.timers['eke']:
                                eke.set_eke_diffusivities(vs)

                            with vs.timers['tke']:
                                tke.set_tke_diffusivities(vs)

                            with vs.timers['momentum']:
                                momentum.momentum(vs)

                            with vs.timers['temperature']:
                                thermodynamics.thermodynamics(vs)

                            if vs.enable_eke or vs.enable_tke or vs.enable_idemix:
                                advection.calculate_velocity_on_wgrid(vs)

                            with vs.timers['eke']:
                                if vs.enable_eke:
                                    eke.integrate_eke(vs)

                            with vs.timers['idemix']:
                                if vs.enable_idemix:
                                    idemix.integrate_idemix(vs)

                            with vs.timers['tke']:
                                if vs.enable_tke:
                                    tke.integrate_tke(vs)

                            utilities.enforce_boundaries(vs, vs.u[:, :, :, vs.taup1])
                            utilities.enforce_boundaries(vs, vs.v[:, :, :, vs.taup1])
                            if vs.enable_tke:
                                utilities.enforce_boundaries(vs, vs.tke[:, :, :, vs.taup1])
                            if vs.enable_eke:
                                utilities.enforce_boundaries(vs, vs.eke[:, :, :, vs.taup1])
                            if vs.enable_idemix:
                                utilities.enforce_boundaries(vs, vs.E_iw[:, :, :, vs.taup1])

                            momentum.vertical_velocity(vs)

                        with vs.timers['plugins']:
                            for plugin in self._plugin_interfaces:
                                with vs.timers[plugin.name]:
                                    plugin.run_entrypoint(vs)

                        vs.itt += 1
                        vs.time += vs.dt_tracer
                        pbar.advance_time(vs.dt_tracer)

                        self.after_timestep(vs)

                        with vs.timers['diagnostics']:
                            if not diagnostics.sanity_check(vs):
                                raise RuntimeError('solution diverged at iteration {}'.format(vs.itt))

                            if vs.enable_neutral_diffusion and vs.enable_skew_diffusion:
                                isoneutral.isoneutral_diag_streamfunction(vs)

                            diagnostics.diagnose(vs)
                            diagnostics.output(vs)

                        # NOTE: benchmarks parse this, do not change / remove
                        logger.debug(' Time step took {:.2f}s', vs.timers['main'].get_last_time())

                        # permutate time indices
                        vs.taum1, vs.tau, vs.taup1 = vs.tau, vs.taup1, vs.taum1

            except:
                logger.critical('Stopping integration at iteration {}', vs.itt)
                raise

            else:
                logger.success('Integration done\n')

            finally:
                diagnostics.write_restart(vs, force=True)

                timing_summary = [
                    '',
                    'Timing summary:',
                    ' setup time               = {:.2f}s'.format(vs.timers['setup'].get_time()),
                    ' main loop time           = {:.2f}s'.format(vs.timers['main'].get_time()),
                    '   momentum               = {:.2f}s'.format(vs.timers['momentum'].get_time()),
                    '     pressure             = {:.2f}s'.format(vs.timers['pressure'].get_time()),
                    '     friction             = {:.2f}s'.format(vs.timers['friction'].get_time()),
                    '   thermodynamics         = {:.2f}s'.format(vs.timers['temperature'].get_time()),
                    '     lateral mixing       = {:.2f}s'.format(vs.timers['isoneutral'].get_time()),
                    '     vertical mixing      = {:.2f}s'.format(vs.timers['vmix'].get_time()),
                    '     equation of state    = {:.2f}s'.format(vs.timers['eq_of_state'].get_time()),
                    '   EKE                    = {:.2f}s'.format(vs.timers['eke'].get_time()),
                    '   IDEMIX                 = {:.2f}s'.format(vs.timers['idemix'].get_time()),
                    '   TKE                    = {:.2f}s'.format(vs.timers['tke'].get_time()),
                    ' diagnostics and I/O      = {:.2f}s'.format(vs.timers['diagnostics'].get_time()),
                    ' plugins                  = {:.2f}s'.format(vs.timers['plugins'].get_time()),
                ]

                timing_summary.extend([
                    '   {:<22} = {:.2f}s'.format(plugin.name, vs.timers[plugin.name].get_time())
                    for plugin in vs._plugin_interfaces
                ])

                logger.debug('\n'.join(timing_summary))

                if profiler is not None:
                    diagnostics.stop_profiler(profiler)
Example #7
0
 def print_and_upload_result(self, name, metrics, line):
     with open(self.out, "rb") as out:
         lines = out.read().splitlines()
         result = lines[-line:]
         LOG.success(f"Result for {self.name}, uploaded under {name}:")
         self._print_upload_perf(name, metrics, result)
Example #8
0
 def Conectar(self):
     T = str(M.b64encode(self.email.encode()))
     self.arqInit = str(R.logs_path) + '/' + T + '_init.conf'
     if os.path.exists(self.arqInit):
         os.remove(self.arqInit)
     if self.contareal:
         self.tipoconta = 'REAL'
     else:
         self.tipoconta = 'PRACTICE'
     if self.ent_tipo == 'P':
         self.ent_valor1 = round(self.valorinicial * self.ent_valor1 / 100,
                                 2)
         self.ent_gale1 = round(self.valorinicial * self.ent_gale1 / 100, 2)
         self.ent_gale2 = round(self.valorinicial * self.ent_gale2 / 100, 2)
     if self.usarsoros:
         self.ent_valor1 = round(self.valorinicial * self.percent / 100, 2)
         self.ent_gale1 = 0
         self.ent_gale2 = 0
         if self.qtdgales > 0:
             self.ent_gale1 = 2
         if self.qtdgales > 1:
             self.ent_gale2 = 2
     else:
         if self.qtdgales < 1:
             self.ent_gale1 = 0
         if self.qtdgales < 2:
             self.ent_gale2 = 0
     if self.loadLista() == 0:
         A.info(
             Idioma.traducao('Lista vazia ou com dia/horário expirados.'))
     A.info(Idioma.traducao('Aguarde, conectando a IQ...'))
     print(Idioma.traducao('Aguarde, conectando a IQ...'))
     self.View.janela.Refresh()
     N.createapiconnection(self.email, self.senha, self.tipoconta)
     conect = API.instance().connection
     if conect:
         if self.VerificarLicenca():
             C.instance().actual_balance = 0
             if self.tipostop == 'P':
                 X = self.stopgain / 100
                 Z = self.stoploss / 100
                 C.instance().win_limit = self.valorinicial * X
                 C.instance().stop_limit = self.valorinicial * Z * -1
             else:
                 C.instance().win_limit = self.stopgain
                 C.instance().stop_limit = self.stoploss * -1
             A.info('Versão: ' + self.versaoapp)
             A.success(
                 Idioma.traducao('Tipo de conta:') + ' {}', self.tipoconta)
             A.info(Idioma.traducao('Parâmetros iniciais:'))
             A.success(
                 Idioma.traducao('Valor inicial: $') + '{}',
                 round(self.valorinicial, 2))
             A.success(
                 Idioma.traducao('Quantidade de gales:') + ' {}',
                 self.qtdgales)
             A.success(
                 Idioma.traducao('Payout mínimo:') + ' {}', self.payoutmin)
             if self.prestop:
                 A.success(Idioma.traducao('Pré-Stop Loss: Ligado'))
             if self.esperarIQ:
                 A.success(Idioma.traducao('Resultado Resp. IQ'))
             else:
                 A.success(Idioma.traducao('Resultado por Taxas'))
             A.success('Delay: {}', self.delay)
             if self.priorid == 0:
                 A.success(
                     Idioma.traducao('Prioridade') + ': ' +
                     Idioma.traducao('Maior Payout'))
             elif self.priorid == 1:
                 A.success(
                     Idioma.traducao('Prioridade') + ': ' +
                     Idioma.traducao('Digital'))
             elif self.priorid == 2:
                 A.success(
                     Idioma.traducao('Prioridade') + ': ' +
                     Idioma.traducao('Binárias'))
             if self.naonoticia:
                 A.success(Idioma.traducao('Não operar em notícia'))
             if self.tendusar:
                 if self.tendemasma:
                     A.success(
                         Idioma.traducao('Não Operar Contra') + ': ' +
                         Idioma.traducao('Usar EMA5 + SMA20'))
                 else:
                     A.success(
                         Idioma.traducao('Não Operar Contra') + ': ' +
                         Idioma.traducao('Quant. Velas') + ': ' +
                         str(self.tendvelas))
             if not self.usarsoros:
                 A.info(Idioma.traducao('Entradas fixas:'))
                 A.success(
                     Idioma.traducao('Entrada: $') + '{}',
                     round(self.ent_valor1, 2))
                 if self.ent_gale1 > 0:
                     A.success(
                         Idioma.traducao('Gale 1: $') + '{}',
                         round(self.ent_gale1, 2))
                 if self.ent_gale2 > 0:
                     A.success(
                         Idioma.traducao('Gale 2: $') + '{}',
                         round(self.ent_gale2, 2))
             else:
                 A.info('Soros:')
                 if self.modelo == 'A':
                     A.success(Idioma.traducao('Modelo: Agressivo'))
                 elif self.modelo == 'M':
                     A.success(Idioma.traducao('Modelo: Moderado'))
                 else:
                     A.success(Idioma.traducao('Modelo: Conservador'))
                 A.success(
                     Idioma.traducao('1ª entrada: %') + '{} | ' +
                     Idioma.traducao('Valor: $') + '{}', self.percent,
                     round(self.ent_valor1, 2))
             A.warning(
                 'WIN %{} - ' +
                 Idioma.traducao('Parar de operar quando atingir: $') +
                 '{}', self.stopgain, round(C.instance().win_limit, 2))
             A.warning(
                 'LOSS %{} - ' +
                 Idioma.traducao('Parar de operar quando atingir: $') +
                 '{}', self.stoploss, round(C.instance().stop_limit, 2))
             print('Versão: ' + self.versaoapp)
             print(Idioma.traducao('Tipo de conta:'), self.tipoconta)
             print(Idioma.traducao('Parâmetros iniciais:'))
             print(Idioma.traducao('Quantidade de gales:'), self.qtdgales)
             print(Idioma.traducao('Payout mínimo:'), self.payoutmin)
             if self.prestop:
                 print(Idioma.traducao('Pré-Stop Loss: Ligado'))
             if self.esperarIQ:
                 print(Idioma.traducao('Resultado Resp. IQ'))
             else:
                 print(Idioma.traducao('Resultado por Taxas'))
             print('Delay: ', self.delay)
             if self.priorid == 0:
                 print(
                     Idioma.traducao('Prioridade') + ': ' +
                     Idioma.traducao('Maior Payout'))
             elif self.priorid == 1:
                 print(
                     Idioma.traducao('Prioridade') + ': ' +
                     Idioma.traducao('Digital'))
             elif self.priorid == 2:
                 print(
                     Idioma.traducao('Prioridade') + ': ' +
                     Idioma.traducao('Binárias'))
             if self.naonoticia:
                 print(Idioma.traducao('Não operar em notícia'))
             if self.tendusar:
                 if self.tendemasma:
                     print(
                         Idioma.traducao('Não Operar Contra') + ': ' +
                         Idioma.traducao('Usar EMA5 + SMA20'))
                 else:
                     print(
                         Idioma.traducao('Não Operar Contra') + ': ' +
                         Idioma.traducao('Quant. Velas') + ': ' +
                         str(self.tendvelas))
             if not self.usarsoros:
                 print(Idioma.traducao('Entradas fixas:'))
                 print(Idioma.traducao('Entrada: $'),
                       round(self.ent_valor1, 2))
                 if self.ent_gale1 > 0:
                     print(Idioma.traducao('Gale 1: $'),
                           round(self.ent_gale1, 2))
                 if self.ent_gale2 > 0:
                     print(Idioma.traducao('Gale 2: $'),
                           round(self.ent_gale2, 2))
             else:
                 print('Soros:')
                 if self.modelo == 'A':
                     print(Idioma.traducao('Modelo: Agressivo'))
                 elif self.modelo == 'M':
                     print(Idioma.traducao('Modelo: Moderado'))
                 else:
                     print(Idioma.traducao('Modelo: Conservador'))
                 print((Idioma.traducao('1ª entrada: %') + '{0} | ' +
                        Idioma.traducao('Valor: $') + '{1}').format(
                            self.percent, round(self.ent_valor1, 2)))
             self.View.janela['valorinic'].update(
                 value=(round(self.valorinicial, 2)))
             self.View.janela['saldoatual'].update(
                 value=(C.instance().balance))
             self.View.janela['stopgainp'].update(value=(self.stopgain))
             self.View.janela['stopgainv'].update(
                 value=(round(C.instance().win_limit, 2)))
             self.View.janela['stoplossp'].update(value=(self.stoploss))
             self.View.janela['stoplossv'].update(
                 value=(round(C.instance().stop_limit, 2)))
             self.View.janela.Refresh()
             if not self.usarsoros:
                 self.valorinicial = 0
             C.instance().sorosgale.config_ini(self.valorinicial,
                                               self.percent, self.modelo)
             return True
     return False
    def sync_state(self, pixelArray):
        """ Code to run when in "sync" state
            Synchronising with prospective optical gating for phase-locked triggering.
        """
        logger.debug("Processing frame in prospective optical gating mode.")

        # Gets the phase (in frames) and arrays of SADs between the current frame and the referencesequence
        currentPhaseInFrames, sad, self.pog_settings = pog.phase_matching(
            pixelArray, self.ref_frames, settings=self.pog_settings)
        logger.trace(sad)

        # Convert phase to 2pi base
        current_phase = (
            2 * np.pi *
            (currentPhaseInFrames - self.pog_settings["numExtraRefFrames"]) /
            self.pog_settings["reference_period"])  # rad

        # Calculate cumulative phase (phase) from delta phase (current_phase - last_phase)
        if len(self.frame_history) == 0:  # i.e. first frame
            logger.debug(
                "First frame, using current phase as cumulative phase.")
            delta_phase = 0
            phase = current_phase
            self.last_phase = current_phase
        else:
            delta_phase = current_phase - self.last_phase
            while delta_phase < -np.pi:
                delta_phase += 2 * np.pi
            phase = self.frame_history[-1].metadata[
                "unwrapped_phase"] + delta_phase
            self.last_phase = current_phase

        # Evicts the oldest entry in frame_history if it exceeds the history length that we are meant to be retaining
        # Note: deletion of first list element is potentially a performance issue,
        # although we are hopefully capping the length low enough that it doesn't become a real bottleneck
        if len(self.frame_history) >= self.settings["frame_buffer_length"]:
            del self.frame_history[0]

        # Append PixelArray object to frame_history list with its metadata
        pixelArray.metadata["unwrapped_phase"] = phase
        pixelArray.metadata["sad_min"] = np.argmin(sad)
        self.frame_history.append(pixelArray)

        logger.debug(
            "Current time: {0} s; cumulative phase: {1} (delta:{2:+f}) rad; sad: {3}",
            self.frame_history[-1].metadata["timestamp"],
            self.frame_history[-1].metadata["unwrapped_phase"],
            delta_phase,
            self.frame_history[-1].metadata["sad_min"],
        )

        # If we have at least one period of phase history, have a go at predicting a future trigger time
        # (Note that this prediction can be disabled by enabling "phase_stamp_only" in pog_settings
        this_predicted_trigger_time_s = None
        sendTriggerNow = 0
        if (len(self.frame_history) > self.pog_settings["reference_period"]
                and self.pog_settings["phase_stamp_only"] != True):
            logger.debug("Predicting trigger...")

            # TODO: JT writes: this seems as good a place as any to highlight the general issue that the code is not doing a great job of precise timing.
            # It determines a delay time before sending the trigger, but then executes a bunch more code.
            # Oh and, more importantly, that delay time is then treated relative to “current_time_s”, which is set *after* doing the phase-matching.
            # That is going to reduce accuracy and precision, and also makes me even more uncomfortable in terms of future-proofing.
            # I think it would be much better to pass around absolute times, not deltas.

            # Gets the trigger response
            logger.trace("Predicting next trigger.")
            time_to_wait_seconds = pog.predict_trigger_wait(
                pa.get_metadata_from_list(
                    self.frame_history,
                    ["timestamp", "unwrapped_phase", "sad_min"]),
                self.pog_settings,
                fitBackToBarrier=True,
            )
            logger.trace("Time to wait: {0} s.".format(time_to_wait_seconds))
            # frame_history is an nx3 array of [timestamp, phase, argmin(SAD)]
            # phase (i.e. frame_history[:,1]) should be cumulative 2Pi phase
            # targetSyncPhase should be in [0,2pi]

            this_predicted_trigger_time_s = (
                self.frame_history[-1].metadata["timestamp"] +
                time_to_wait_seconds)

            # Captures the image
            if time_to_wait_seconds > 0:
                logger.info("Possible trigger after: {0}s",
                            time_to_wait_seconds)

                (
                    time_to_wait_seconds,
                    sendTriggerNow,
                    self.pog_settings,
                ) = pog.decide_trigger(
                    self.frame_history[-1].metadata["timestamp"],
                    time_to_wait_seconds,
                    self.pog_settings,
                )
                if sendTriggerNow != 0:
                    logger.success(
                        "Sending trigger (reason: {0}) at time ({1} plus {2}) s",
                        sendTriggerNow,
                        self.frame_history[-1].metadata["timestamp"],
                        time_to_wait_seconds,
                    )
                    # Trigger only
                    self.trigger_fluorescence_image_capture(
                        this_predicted_trigger_time_s)

                    # Update trigger iterator (for adaptive algorithm)
                    self.trigger_num += 1

        # Update PixelArray with predicted trigger time and trigger type
        self.frame_history[-1].metadata[
            "predicted_trigger_time_s"] = this_predicted_trigger_time_s
        self.frame_history[-1].metadata["trigger_type_sent"] = sendTriggerNow
        logger.debug(
            "Current time: {0} s; predicted trigger time: {1} s; trigger type: {2}",
            self.frame_history[-1].metadata["timestamp"],
            self.frame_history[-1].metadata["predicted_trigger_time_s"],
            self.frame_history[-1].metadata["trigger_type_sent"],
        )

        # store this phase now to calculate the delta phase for the next frame
        self.last_phase = float(current_phase)
Example #10
0
    def __init__(self, running_file='running'):
        self.running_file = running_file

    def start_scan(self):
        with open(self.running_file, 'w') as f:
            f.write(str(1))

    def check_for_flag(self, flag):
        pass

    def is_scan_done(self):
        with open(self.running_file, 'r') as f:
            running = bool(int(f.read().strip('\n')))
        return not running

    def wait_for_scan_done(self):
        while not md.is_scan_done():
            time.sleep(1)


if __name__ == "__main__":
    md = MockDriver()
    logger.info('Starting scan')
    md.start_scan()
    md.wait_for_scan_done()
    logger.success('Scan done')


    # print(md.check_for_flag("4f6a142e-56ff-f229-4d1d-aa055c000de7"))

Example #11
0
def run(args):
    hosts = ["localhost", "localhost", "localhost"]

    with infra.ccf.network(hosts,
                           args.build_dir,
                           args.debug_nodes,
                           args.perf_nodes,
                           pdb=args.pdb) as network:
        first_node, (backups) = network.start_and_join(args)

        term_info = {}
        long_msg = "X" * (2**14)

        # first timer determines after how many seconds each node will be suspended
        timeouts = []
        t = random.uniform(1, 10)
        LOG.info(
            f"Initial timer for node {first_node.node_id} is {t} seconds...")
        timeouts.append((t, first_node))
        for backup in backups:
            t = random.uniform(1, 10)
            LOG.info(
                f"Initial timer for node {backup.node_id} is {t} seconds...")
            timeouts.append((t, backup))

        for t, node in timeouts:
            tm = Timer(
                t,
                timeout,
                args=[node, True, args.election_timeout / 1000],
            )
            tm.start()

        with first_node.node_client() as mc:
            check_commit = infra.checker.Checker(mc)
            check = infra.checker.Checker()

            clients = []
            with contextlib.ExitStack() as es:
                LOG.info("Write messages to nodes using round robin")
                clients.append(
                    es.enter_context(first_node.user_client(format="json")))
                for backup in backups:
                    clients.append(
                        es.enter_context(backup.user_client(format="json")))
                node_id = 0
                for id in range(1, TOTAL_REQUESTS):
                    node_id += 1
                    c = clients[node_id % len(clients)]
                    try:
                        resp = c.rpc("LOG_record", {"id": id, "msg": long_msg})
                    except Exception:
                        LOG.info("Trying to access a suspended node")
                    try:
                        cur_primary, cur_term = network.find_primary()
                        term_info[cur_term] = cur_primary.node_id
                    except Exception:
                        LOG.info("Trying to access a suspended node")
                    id += 1

                # wait for the last request to commit
                final_msg = "Hello world!"
                check_commit(
                    c.rpc("LOG_record", {
                        "id": 1000,
                        "msg": final_msg
                    }),
                    result=True,
                )
                check(
                    c.rpc("LOG_get", {"id": 1000}),
                    result={"msg": final_msg},
                )

                # check that a new node can catch up after all the requests
                new_node = network.create_and_trust_node(
                    lib_name=args.package,
                    host="localhost",
                    args=args,
                )
                assert new_node

                # give new_node a second to catch up
                time.sleep(1)

                with new_node.user_client(format="json") as c:
                    check(
                        c.rpc("LOG_get", {"id": 1000}),
                        result={"msg": final_msg},
                    )

                # assert that view changes actually did occur
                assert len(term_info) > 1

                LOG.success(
                    "----------- terms and primaries recorded -----------")
                for term, primary in term_info.items():
                    LOG.success(f"term {term} - primary {primary}")
Example #12
0
def index():
    get_templates()
    form = GenerateForm()
    form.platform.choices = get_templates()

    if request.method == 'GET':
        return render_template('index.html', form=form)

    if form.validate_on_submit():
        print("Valid form submission")

        # Do BundleGen work
        outputdir = os.path.abspath(
            os.path.join(TMP_DIR, Utils.get_random_string(5)))
        selected_platform = STBPlatform(form.platform.data)

        if not selected_platform.found_config():
            print(f"Could not find config for platform {form.platform.data}")
            raise AppError("Could not find platform")

        img_url = ""
        creds = ""
        if form.image_url.data:
            # If downloading from URL, just use that as-is
            img_url = form.image_url.data
            # Add creds if given
            if form.registry_uname.data and form.registry_password.data:
                creds = f"{form.registry_uname.data}:{form.registry_password.data}"

        elif form.uploaded_img.data:
            # Got an uploaded image (hopefully a tar.gz!)
            f = form.uploaded_img.data
            filename = secure_filename(f.filename)
            upload_filepath = os.path.join(UPLOAD_FOLDER, filename)
            f.save(upload_filepath)

            # Extract tar
            # Extract the .tar to a temp directory
            img_temp_path = tempfile.mkdtemp()
            with tarfile.open(upload_filepath) as tar:
                tar.extractall(img_temp_path)

            # Delete tar
            os.remove(upload_filepath)

            img_url = f"oci:{img_temp_path}:latest"
            creds = None

        if not img_url:
            print("IMG URL is empty")
            raise AppError("Image URL cannot be empty")

        # Download Image
        img_downloader = ImageDownloader()
        img_path = img_downloader.download_image(
            img_url, creds, selected_platform.get_config())

        if not img_path:
            logger.error("Failed to donwload image")
            raise AppError("Image download failed")

        # Unpack the image with umoci
        tag = ImageDownloader().get_image_tag(img_url)
        img_unpacker = ImageUnpackager()
        img_unpacker.unpack_image(img_path, tag, outputdir)

        # Delete the downloaded image now we've unpacked it
        logger.info(f"Deleting {img_path}")
        shutil.rmtree(img_path)

        # Load app metadata
        app_metadata_image_path = os.path.join(outputdir, "rootfs",
                                               "appmetadata.json")
        image_metadata_exists = os.path.exists(app_metadata_image_path)

        app_metadata_dict = {}

        custom_app_metadata = form.app_metadata.data

        if not image_metadata_exists and not custom_app_metadata:
            # No metadata at all
            logger.error(
                f"Cannot find app metadata file in OCI image and none provided to BundleGen"
            )
            raise AppError("No Metadata provided")
        elif (not image_metadata_exists
              and custom_app_metadata) or (image_metadata_exists
                                           and custom_app_metadata):
            # Use custom metadata
            app_metadata_dict = json.loads(custom_app_metadata)
        else:
            # Load metadata from image
            with open(app_metadata_image_path) as metadata:
                app_metadata_dict = json.load(metadata)

        # remove app metadata from image rootfs
        if image_metadata_exists:
            os.remove(app_metadata_image_path)

        # Begin processing. Work in the output dir where the img was unpacked to
        processor = BundleProcessor(selected_platform.get_config(), outputdir,
                                    app_metadata_dict, False,
                                    form.lib_match.data)
        if not processor.check_compatibility():
            # Not compatible - delete any work done so far
            shutil.rmtree(outputdir)
            raise AppError("App incompatible")

        success = processor.begin_processing()

        if not success:
            logger.warning("Failed to produce bundle")
            raise AppError("Something went wrong")

        tarball_name = app_metadata_dict["id"] + Utils.get_random_string(6)

        Utils.create_tgz(outputdir, tarball_name)
        logger.success(
            f"Successfully generated bundle at {tarball_name}.tar.gz")

        # Move to persistant storage
        print(f"Moving '{tarball_name}.tar.gz' to {BUNDLE_STORE_DIR}")
        shutil.move(f'{tarball_name}.tar.gz', BUNDLE_STORE_DIR)

        return jsonify(success=True)
Example #13
0
 def __exit__(self, *exc):
     self.end = maya.now()._epoch
     delta = self.end - self.start
     logger.success(f"It took {delta}s")
     logger.success(f"It took {(delta*1000)}ms")
     return False
Example #14
0
    def run(self, actions, battery, lat_long_actual, box):

        logger.success('Start communication')

        init_hour = self.time_hour()
        init_minute = self.time_minute(init_hour)

        while True:

            if self.connection is None:
                self.connection = self.board.open_connection(port=self.port)
                sleep(0.9)

            try:

                message_board = self.connection.readline().decode('utf-8', 'replace')

                if str(message_board[:8]) == str(self.PATTERN):

                    if self.board.digit_verify(str(message_board)):

                        self.board.send_message(connection=self.connection, message=self.board.SEND_OK)

                        message_board = message_board.split(',')

                        # basculamento

                        if message_board[2] == '1':

                            logger.debug('{}'.format(message_board))

                            box[0] = int(message_board[3])

                            code = self.code_cart(box=box[0], battery=battery[:])

                            if code is None:
                                self.board.send_message(connection=self.connection, message=self.NOT_FOUND)
                                logger.info('Not identified :(')
                            else:
                                logger.success('Caixote {}, basculou na carreta: {} '.format(box[0], code))

                                attempts = 1

                                while attempts <= 5:
                                    
                                    self.board.send_message(connection=self.connection,
                                                            message='$PNEUD,G,1,{}'.format(code))

                                    logger.debug('Send: {}'.format(attempts))
                                    sleep(0.5)
                                    attempts += 1

                        if message_board[2] == '2':

                            # camera off and analise reboot

                            if message_board[3] == '0':

                                if self.preview_state == 1:

                                    # actions[0] = 0
                                    self.preview_state = int(message_board[3])

                                    actual_hour = self.time_hour()

                                    if actual_hour[0] < init_hour[0]:
                                        actual_hour[0] = int(actual_hour[0]) + 24

                                    actual_minute = self.time_minute(actual_hour)
                                    reboot = actual_minute - init_minute

                                    if reboot >= self.reboot:
                                        logger.info('Reboot system')
                                        sleep(0.8)
                                        os.system('sudo reboot')

                            # camera start

                            elif message_board[3] == '1':

                                if self.preview_state == 0:
                                    # actions[0] = 1
                                    self.preview_state = int(message_board[3])

                                    try:
                                        lat_long_actual[0], lat_long_actual[1] = self.convert_coord(
                                            array=[float(message_board[4]), float(message_board[5])])

                                    except ValueError:
                                        logger.error('Invalid GPS')

            except UnicodeError:
                logger.error('Unicode error')

            except IOError:
                self.close_connection()
                logger.error('Serial error')

            except AttributeError:
                self.close_connection()
                logger.error('Invalid argument')
Example #15
0
    def _x_call_gce(self, version_only=False):
        """
        Call GCE on kmerfreq table to estimate genome characters
        """
        if version_only:
            # print the version
            proc = sps.Popen(
                [self.binaries["gce"], "-V"],
                stderr=sps.STDOUT,
                stdout=sps.PIPE,
            )
            out = proc.communicate()[0].decode().split("\n")
            out = [i for i in out if "Version" in i][0]
            vers = out.split()[-1]
            return vers

        # prerun commands
        resfile = os.path.join(self.workdir, self.srr,
                               self.srr + ".kmer.freq.stat")
        cmd1 = ['cat', resfile]
        cmd2 = ['grep', '#Kmer indivdual number']  # (sic)
        null = "cat {workdir}/{srr}/{srr}.kmer.freq.stat | grep '#Kmer indiv'"
        logger.info("Executing {}:".format(null))
        logger.debug("Executing: {}".format(" ".join(cmd1)))
        proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
        proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE)
        out = proc2.communicate()
        if proc2.returncode:
            logger.error(out[0].decode())

        # store ikmer result
        ikmer = out[0].decode().strip().split()[-1]
        logger.success("Kmer individual number: {}".format(ikmer))

        # write kmer 2 column sub result file
        res2col = resfile + ".2colum"
        logger.info("Parsing 2-columns file to: {}".format(res2col))
        arr = pd.read_csv(resfile, skiprows=7, sep="\t", header=None)
        arr = arr.iloc[:, :2]
        arr.to_csv(res2col, index=False, sep="\t", header=None)

        # Run in homozygous mode
        logger.info("Running 'gce' in homozygous mode to estimate coverage")
        null = "{gce} -g " + ikmer + " -f {res.2col}"
        logger.info("Executing: {}".format(null))
        cmd = [
            self.binaries["gce"],
            "-g",
            ikmer,
            "-f",
            res2col,
        ]
        logger.debug(" ".join(cmd))
        proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
        self.gce1out = proc.communicate()
        if proc.returncode:
            logger.error(self.gce1out[0].decode())

        # write to a tmp file
        parse = self.gce1out[0].decode().split("Final estimation table:")
        parse = parse[-1].strip().split("\n")
        headers, data = parse[:2]
        headers = headers.strip().split("\t")
        data = data.strip().split("\t")
        self.h0dict = {}
        for i, j in zip(headers, data):
            self.h0dict[i] = j
            logger.success("GCE H0 {}: {}".format(j))

        # Run in heterozygous mode
        logger.info("Running 'gce' in heterozygous mode.")
        null = "{gce} -g " + ikmer + " -f {res.2col} -H 1 -c {coverage}"
        logger.info("Executing: {}".format(null))
        cmd = [
            self.binaries["gce"], "-g", ikmer, "-f", res2col, "-H", "1", "-c",
            str(int(float(round(self.h0dict["coverage_depth"]))))
        ]
        logger.debug(" ".join(cmd))
        proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
        self.gce2out = proc.communicate()
        if proc.returncode:
            logger.error(self.gce2out[0].decode())

        # write to a tmp file
        parse = self.gce2out[0].decode().split("Final estimation table:")
        parse = coverage[-1].strip().split("\n")
        logger.success("GCE genome size: {}".format(coverage))
        logger.success("GCE heterozygosity: {}".format(coverage))
        logger.success("GCE coverage depth: {}".format(coverage))
    def determine_state(self, pixelArray, modeString="determine period"):
        """ Code to run when in "determine" state
            Determine period mode (default behaviour requires user input).
            In this mode we obtain a minimum number of frames, determine a
            period and then return.
            It is assumed that the user (or cli/flask app) then runs the
            user_select_ref_frame function (and updates the state) before running
            analyse again with the new state.
        """
        logger.debug("Processing frame in {0} mode.".format(modeString))

        # Adds new frame to buffer
        self.ref_buffer.append(pixelArray)
        # Impose an upper limit on the buffer length, to protect against performance degradation
        # in cases where we are not succeeding in identifying a period
        # Note: deletion of first list element is potentially a performance issue,
        # although we are hopefully capping the length low enough that it doesn't become a real bottleneck
        ref_buffer_duration = (self.ref_buffer[-1].metadata["timestamp"] -
                               self.ref_buffer[0].metadata["timestamp"])
        if (("min_heart_rate_hz" in self.settings) and
            (ref_buffer_duration > 1.0 / self.settings["min_heart_rate_hz"])):
            logger.debug("Trimming buffer to duration {0}".format(
                1.0 / self.settings["min_heart_rate_hz"]))
            del self.ref_buffer[0]

        # Calculate period from determine_reference_period.py
        logger.info("Attempting to determine new reference period.")
        self.ref_frames, self.pog_settings = ref.establish(
            self.ref_buffer, self.period_guesses, self.pog_settings)

        if self.ref_frames is not None:
            # We were provided with ref_frames as a list, and this is helpful because it means we could still access
            # the PixelArray metadata at this point if we wished.

            # However, long-term we want to store a 3D array because that is what oga expects to work with.
            # We therefore make that conversion here
            self.ref_frames = np.array(self.ref_frames)

            # Automatically select a target frame and barrier
            # This can be overriden by the user/controller later
            self.pog_settings = pog.pick_target_and_barrier_frames(
                self.ref_frames, self.pog_settings)

            # Determine barrier frames
            self.pog_settings = pog.determine_barrier_frames(self.pog_settings)

            # Save the period
            ref.save_period(self.ref_frames, self.settings["period_dir"])
            logger.success("Period determined.")
            self.justRefreshedRefFrames = True  # Flag that a slow action took place

            if self.automatic_target_frame:
                logger.info(
                    "Period determined and target frame automatically selected; switching to prospective optical gating mode."
                )
                # Automatically switch to the "sync" state, using the default reference frame.
                # The user is expected to change the reference frame later, via a GUI, if they wish to
                self.start_sync_with_ref_frame(
                    self.pog_settings["referenceFrame"])
                self.state = "sync"
            else:
                # If we aren't using the automatically determined period
                # We raise the stop flag, which returns the current state to the user/app
                # The user/app can then select a target frame
                # The user/app will also need to call the adaptive system
                # see user_select_ref_frame()
                self.stop = True
def write_results_to_file(result_dict: Dict):
    logger.info(f"| ----- Results will be written into file.")
    with open("census2010.py", "w") as res_file:
        res_file.write(f"'Results' = {pprint.pformat(result_dict)}")
    logger.success(f"| ----- Results are in the census2010.py file.")
 def trigger_fluorescence_image_capture(self, delay):
     """As this is the base server, this function just outputs a log that a trigger would have been sent."""
     logger.success("A fluorescence image would be triggered now.")
Example #19
0
def run(get_command, args):
    if args.fixed_seed:
        seed(getpass.getuser())

    hosts = args.nodes
    if not hosts:
        hosts = ["local://localhost"] * minimum_number_of_local_nodes(args)

    args.initial_user_count = 3

    LOG.info("Starting nodes on {}".format(hosts))

    with infra.network.network(hosts,
                               args.binary_dir,
                               args.debug_nodes,
                               args.perf_nodes,
                               pdb=args.pdb) as network:
        network.start_and_join(args)
        primary, backups = network.find_nodes()

        command_args = get_command_args(args, get_command)

        if args.use_jwt:
            jwt_key_priv_pem, _ = infra.crypto.generate_rsa_keypair(2048)
            jwt_cert_pem = infra.crypto.generate_cert(jwt_key_priv_pem)
            jwt_kid = "my_key_id"
            jwt_issuer = "https://example.issuer"
            # Add JWT issuer
            with tempfile.NamedTemporaryFile(prefix="ccf",
                                             mode="w+") as metadata_fp:
                jwt_cert_der = infra.crypto.cert_pem_to_der(jwt_cert_pem)
                der_b64 = base64.b64encode(jwt_cert_der).decode("ascii")
                data = {
                    "issuer": jwt_issuer,
                    "jwks": {
                        "keys": [{
                            "kty": "RSA",
                            "kid": jwt_kid,
                            "x5c": [der_b64]
                        }]
                    },
                }
                json.dump(data, metadata_fp)
                metadata_fp.flush()
                network.consortium.set_jwt_issuer(primary, metadata_fp.name)
            jwt = infra.crypto.create_jwt({}, jwt_key_priv_pem, jwt_kid)

            command_args += ["--bearer-token", jwt]

        nodes_to_send_to = filter_nodes(primary, backups, args.send_tx_to)
        clients = []
        client_hosts = []
        if args.one_client_per_backup:
            if not backups:
                raise Exception(
                    "--one-client-per-backup was set but no backup was found")
            client_hosts = ["localhost"] * len(backups)
        else:
            if args.client_nodes:
                client_hosts.extend(args.client_nodes)

        if args.num_localhost_clients:
            client_hosts.extend(["localhost"] *
                                int(args.num_localhost_clients))

        if not client_hosts:
            client_hosts = ["localhost"]

        for client_id, client_host in enumerate(client_hosts):
            node = nodes_to_send_to[client_id % len(nodes_to_send_to)]
            remote_client = configure_remote_client(args, client_id,
                                                    client_host, node,
                                                    command_args)
            clients.append(remote_client)

        if args.network_only:
            for remote_client in clients:
                LOG.info(
                    f"Client can be run with: {remote_client.remote.get_cmd()}"
                )
            while True:
                time.sleep(60)
        else:
            for remote_client in clients:
                remote_client.start()

            hard_stop_timeout = 90

            try:
                with cimetrics.upload.metrics(complete=False) as metrics:
                    tx_rates = infra.rates.TxRates(primary)
                    start_time = time.time()
                    while True:
                        stop_waiting = True
                        for i, remote_client in enumerate(clients):
                            done = remote_client.check_done()
                            # all the clients need to be done
                            LOG.info(
                                f"Client {i} has {'completed' if done else 'not completed'} running ({time.time() - start_time:.2f}s / {hard_stop_timeout}s)"
                            )
                            stop_waiting = stop_waiting and done
                        if stop_waiting:
                            break
                        if time.time() > start_time + hard_stop_timeout:
                            raise TimeoutError(
                                f"Client still running after {hard_stop_timeout}s"
                            )

                        time.sleep(5)

                    tx_rates.get_metrics()

                    for remote_client in clients:
                        perf_result = remote_client.get_result()
                        LOG.success(
                            f"{args.label}/{remote_client.name}: {perf_result}"
                        )

                        # TODO: Only results for first client are uploaded
                        # https://github.com/microsoft/CCF/issues/1046
                        if remote_client == clients[0]:
                            LOG.success(
                                f"Uploading results for {remote_client.name}")
                            metrics.put(args.label, perf_result)
                        else:
                            LOG.warning(
                                f"Skipping upload for {remote_client.name}")

                    primary, _ = network.find_primary()
                    with primary.client() as nc:
                        r = nc.get("/node/memory")
                        assert r.status_code == http.HTTPStatus.OK.value

                        results = r.body.json()
                        tx_rates.insert_metrics(**results)

                        # Construct name for heap metric, removing ^ suffix if present
                        heap_peak_metric = args.label
                        if heap_peak_metric.endswith("^"):
                            heap_peak_metric = heap_peak_metric[:-1]
                        heap_peak_metric += "_mem"

                        peak_value = results["peak_allocated_heap_size"]
                        metrics.put(heap_peak_metric, peak_value)

                    LOG.info(f"Rates:\n{tx_rates}")
                    tx_rates.save_results(args.metrics_file)

                    for remote_client in clients:
                        remote_client.stop()

            except Exception:
                LOG.error("Stopping clients due to exception")
                for remote_client in clients:
                    remote_client.stop()
                raise
Example #20
0
    def start_and_join(self, args):
        """
        Starts a CCF network.
        :param args: command line arguments to configure the CCF nodes.
        """
        self.common_dir = get_common_folder_name(args.workspace, args.label)

        assert (args.gov_script is not None
                ), "--gov-script argument must be provided to start a network"

        self._setup_common_folder(args.gov_script)

        mc = max(1, args.initial_member_count)
        initial_members_info = []
        for i in range(mc):
            initial_members_info += [(
                i,
                (i < args.initial_recovery_member_count),
                {
                    "is_operator": True
                } if (i < args.initial_operator_count) else None,
            )]

        self.consortium = infra.consortium.Consortium(
            self.common_dir,
            self.key_generator,
            self.share_script,
            initial_members_info,
            args.participants_curve,
            authenticate_session=not args.disable_member_session_auth,
        )
        initial_users = list(range(max(0, args.initial_user_count)))
        self.create_users(initial_users, args.participants_curve)

        primary = self._start_all_nodes(args)
        self.wait_for_all_nodes_to_catch_up(primary)
        LOG.success("All nodes joined network")

        self.consortium.activate(primary)

        if args.js_app_script:
            LOG.error(
                "--js-app-script is deprecated - update to --js-app-bundle instead"
            )
            infra.proc.ccall("cp", args.js_app_script,
                             args.binary_dir).check_returncode()
            self.consortium.set_js_app(remote_node=primary,
                                       app_script_path=args.js_app_script)

        if args.js_app_bundle:
            self.consortium.deploy_js_app(remote_node=primary,
                                          app_bundle_path=args.js_app_bundle)

        for path in args.jwt_issuer:
            self.consortium.set_jwt_issuer(remote_node=primary, json_path=path)

        self.consortium.add_users(primary, initial_users)
        LOG.info(f"Initial set of users added: {len(initial_users)}")

        self.consortium.open_network(remote_node=primary)
        self.status = ServiceStatus.OPEN
        LOG.success("***** Network is now open *****")
            logger.debug(choice)
            net = self.supernet.sample(choice)
            for j in range(len(self.obj_functions)):
                f_res[j][i] = self.obj_functions[j](net)
            show = ""
            for name, res in zip(self.obj_names, [res[i] for res in f_res]):
                show += "%s:%.6f, " % (name, res)
            logger.info("PopID: {} | Choice {} | Result {}", i, choice, show)
        out["F"] = anp.column_stack(f_res)


if __name__ == "__main__":
    supernet = model.__dict__[args.model]().to(device)
    supernet.load_state_dict(
        torch.load(args.load_path) ["state_dict"])
    problem = MyProblem(supernet)

    res = search(problem,
            pop_size=20,
            n_offsprings=10,
            n_generations=10,
            seed=123)
    X = np.array(list(map(lambda x: x.X, res.pop)))
    F = np.array(list(map(lambda x: x.F, res.pop)))
    logger.info("get popuation")
    logger.success(X)
    logger.info("get result")
    logger.success(F)
    os.makedirs("saved-pops", exist_ok=True)
    torch.save(res.pop, "saved-pops/fair_pop.pkl")
    return d


##########################################################
# MAIN
##########################################################
padding: int = snakemake.params.padding
apply_filters: bool = snakemake.params.get("apply_filters", False)
only_alt: bool = snakemake.params.get("only_alt", False)
adjust_pos: bool = snakemake.params.get("adjust_pos", False)

logger.info("Extracting gene names from panel...")
with open(snakemake.input.panel) as istream:
    genes = extract_genes_from_panel(istream)

logger.success(f"Extracted {len(genes)} genes from the panel")

logger.info("Extracting intervals for genes from GFF...")
with open(snakemake.input.annotation) as istream:
    ivtree = extract_intervals_for_genes_from_gff(genes, istream, padding)
logger.success(f"Intervals extracted for {len(ivtree)} genes")

logger.info(
    "Extracting those VCF records that fall within the gene intervals and altering "
    "their CHROM and POS accordingly...")
vcf_reader = VCF(snakemake.input.vcf)

logger.debug("Adding genes to header...")
for iv in ivtree:
    vcf_reader.add_to_header(
        f"##contig=<ID={iv.data[0]},length={iv.end-iv.begin}>")
Example #23
0
    def convert_json_to_dataframe(self):

        license_list = []
        # Iterate through the accounts
        for account in self.json_to_parse:
            accountName = account['accountName']
            accountDomain = account['accountDomain']
            accountStatus = account['accountStatus']
            accountType = account['accountType']

            account_dict = {'accountName': account['accountName'],
                            'accountDomain': account['accountDomain'],
                            'accountStatus': account['accountStatus'],
                            'accountType': account['accountType']}

            #Iterate through the "roles"
            for role_dict in account['roles']:
                # logger.info('     working on role_dict: {}'.format(role_dict))
                role = role_dict['role']
                account_dict['role'] = role

                # we are only interested, at least at this time, in the following roles.
                # "Appended VA USER" is a faux user created by the SmartAccountSDK due to some wonkiness in how
                # the BU Production Test domain is acting.
                if role in ['Virtual Account Administrator', 'Virtual Account User', 'APPENDED VA USER']:
                    account_dict['virtualAccount'] = role_dict['virtualAccount']
                    account_dict['virtualAccount_status'] = ""
                    account_dict['statusMessage'] = ""
                    licenses_array = []

                    # There will be "assigned licenses" or licenses.  Choose which one we have run into.
                    if 'assignedLicenses' not in role_dict.keys():
                        # Check to see if there are issues.
                        if 'licenses' not in role_dict.keys():
                            logger.error('No licenses or assignedLicenses found!!')
                            continue
                        licenses_array = role_dict['licenses']
                        logger.success('Licenses found!!')
                    else:
                        logger.success('assignedLicenses found!!')
                        licenses_array = role_dict['assignedLicenses']['licenses']
                        account_dict['virtualAccount_status'] = role_dict['assignedLicenses']['status']
                        account_dict['statusMessage'] = role_dict['assignedLicenses']['statusMessage']

                    license_info_dict = {}
                    account_dict_copy = {}

                    # work through the array of licenses
                    for license_dict in licenses_array:
                        # logger.info('         working on license_dict: {}'.format(license_dict))
                        license_info_dict['license'] = license_dict['license']
                        license_info_dict['assignedLicenses_quantity'] = license_dict['quantity']
                        license_info_dict['inUse'] = license_dict['inUse']
                        license_info_dict['available'] = license_dict['available']
                        license_info_dict['ahaApps'] = license_dict['ahaApps']
                        license_info_dict['billingType'] = license_dict['billingType']
                        license_info_dict['pendingQuantity'] = license_dict['pendingQuantity']
                        license_info_dict['reserved'] = license_dict['reserved']
                        license_info_dict['isPortable'] = license_dict['isPortable']
                        license_info_dict['assignedLicenses_status'] = license_dict['status']

                        license_info_dict_copy = license_info_dict.copy()
                        license_detail_dict = {}

                        # Each license could have multiple instances.  i.e. quantities purchase at different times
                        # iterate through them
                        for license_detail in license_dict['licenseDetails']:
                            # logger.info('             working on license_detail: {}'.format(license_detail))
                            if type(license_detail['startDate']) is not type(None):
                                if 'Z' in license_detail['startDate']:
                                    license_detail['startDate'] = \
                                        pd.to_datetime(license_detail['startDate'], format='%m/%d/%y %H:%M',
                                                       infer_datetime_format=True,
                                                       utc=True).to_pydatetime()
                                else:
                                    license_detail['startDate'] = \
                                        pd.to_datetime(license_detail['startDate'],
                                                       infer_datetime_format=True, utc=True).to_pydatetime()
                            if type(license_detail['endDate']) is not type(None):
                                if 'Z' in license_detail['endDate']:
                                    license_detail['endDate'] = \
                                        pd.to_datetime(license_detail['endDate'], format='%m/%d/%y %H:%M',
                                                       infer_datetime_format=True,
                                                       utc=True).to_pydatetime()
                                else:
                                    license_detail['endDate'] = \
                                        pd.to_datetime(license_detail['endDate'],
                                                       infer_datetime_format=True, utc=True).to_pydatetime()

                            # Once we have everything, put into the list of licenses.
                            # We create a copy of the account dict, because we want rows of data in the dataframe.
                            # the information gathered earlier in the account_dict is common for all rows associated
                            # with the account.  i.e. accountName, virtualAccount, etc.
                            account_dict_copy = account_dict.copy()
                            account_dict_copy.update(license_info_dict_copy)
                            account_dict_copy.update(license_detail)
                            license_list.append(account_dict_copy)

        # Last but not least, create the Pandas Dataframe.
        df = pd.DataFrame(license_list)
        logger.info('CSSMJSONParser, convert_json_to_dataframe end')

        return df
Example #24
0
def run(get_command, args):
    if args.fixed_seed:
        seed(getpass.getuser())

    hosts = args.nodes
    if not hosts:
        hosts = ["localhost"] * minimum_number_of_local_nodes(args)

    LOG.info("Starting nodes on {}".format(hosts))

    with infra.network.network(hosts,
                               args.binary_dir,
                               args.debug_nodes,
                               args.perf_nodes,
                               pdb=args.pdb) as network:
        network.start_and_join(args)
        primary, backups = network.find_nodes()

        command_args = get_command_args(args, get_command)

        nodes_to_send_to = filter_nodes(primary, backups, args.send_tx_to)
        clients = []
        client_hosts = []
        if args.one_client_per_backup:
            if not backups:
                raise Exception(
                    "--one-client-per-backup was set but no backup was found")
            client_hosts = ["localhost"] * len(backups)
        else:
            if args.client_nodes:
                client_hosts.extend(args.client_nodes)

        if args.num_localhost_clients:
            client_hosts.extend(["localhost"] *
                                int(args.num_localhost_clients))

        if not client_hosts:
            client_hosts = ["localhost"]

        for client_id, client_host in enumerate(client_hosts):
            node = nodes_to_send_to[client_id % len(nodes_to_send_to)]
            remote_client = configure_remote_client(args, client_id,
                                                    client_host, node,
                                                    command_args)
            clients.append(remote_client)

        if args.network_only:
            for remote_client in clients:
                LOG.info(
                    f"Client can be run with: {remote_client.remote.get_cmd()}"
                )
            while True:
                time.sleep(60)
        else:
            for remote_client in clients:
                remote_client.start()

            hard_stop_timeout = 90

            try:
                with cimetrics.upload.metrics(complete=False) as metrics:
                    tx_rates = infra.rates.TxRates(primary)
                    start_time = time.time()
                    while True:
                        stop_waiting = True
                        for i, remote_client in enumerate(clients):
                            done = remote_client.check_done()
                            # all the clients need to be done
                            LOG.info(
                                f"Client {i} has {'completed' if done else 'not completed'} running ({time.time() - start_time:.2f}s / {hard_stop_timeout}s)"
                            )
                            stop_waiting = stop_waiting and done
                        if stop_waiting:
                            break
                        if time.time() > start_time + hard_stop_timeout:
                            raise TimeoutError(
                                f"Client still running after {hard_stop_timeout}s"
                            )

                        time.sleep(5)

                    tx_rates.get_metrics()

                    for remote_client in clients:
                        perf_result = remote_client.get_result()
                        LOG.success(
                            f"{args.label}/{remote_client.name}: {perf_result}"
                        )

                        # TODO: Only results for first client are uploaded
                        # https://github.com/microsoft/CCF/issues/1046
                        if remote_client == clients[0]:
                            LOG.success(
                                f"Uploading results for {remote_client.name}")
                            metrics.put(args.label, perf_result)
                        else:
                            LOG.warning(
                                f"Skipping upload for {remote_client.name}")

                    LOG.info(f"Rates:\n{tx_rates}")
                    tx_rates.save_results(args.metrics_file)

                    for remote_client in clients:
                        remote_client.stop()

            except Exception:
                LOG.error("Stopping clients due to exception")
                for remote_client in clients:
                    remote_client.stop()
                raise
Example #25
0
async def wordle(app: Ariadne, message: MessageChain, group: Group,
                 member: Member, single_game: ArgResult, dic: RegexResult,
                 length: ArgResult, get_help: ArgResult, give_up: ArgResult,
                 statistic: ArgResult) -> NoReturn:
    if get_help.matched:
        await app.sendGroupMessage(
            group,
            MessageChain(
                "Wordle文字游戏\n"
                "答案为指定长度单词,发送对应长度单词即可\n"
                "灰色块代表此单词中没有此字母\n"
                "黄色块代表此单词中有此字母,但该字母所处位置不对\n"
                "绿色块代表此单词中有此字母且位置正确\n"
                "猜出单词或用光次数则游戏结束\n"
                "发起游戏:/wordle -l=5 -d=SAT,其中-l/-length为单词长度,-d/-dic为指定词典,默认为5和CET4\n"
                "中途放弃:/wordle -g 或 /wordle -giveup\n"
                "查看数据统计:/wordle -s 或 /wordle -statistic\n"
                "查看提示:/wordle -hint\n"
                f"注:目前包含词典:{'、'.join(word_dics)}"))
        return None
    if statistic.matched:
        data = await get_member_statistic(group, member)
        await app.sendGroupMessage(
            group,
            MessageChain(
                f"用户 {member.name}\n"
                f"共参与{data[4]}场游戏,其中胜利{data[0]}场,失败{data[1]}场\n"
                f"一共猜对{data[2]}次,猜错{data[3]}次,共使用过{data[5]}次提示,再接再厉哦~"),
            quote=message.getFirst(Source))
        return None
    if give_up.matched:
        return None
    await mutex.acquire()
    if group.id in group_running and group_running[group.id]:
        await app.sendGroupMessage(group,
                                   MessageChain("本群已有正在运行中的游戏实例,请等待本局游戏结束!"))
        mutex.release()
        return None
    else:
        if dic.matched:
            dic = dic.result.asDisplay().split('=')[1].strip()
            if dic not in word_dics:
                await app.sendGroupMessage(
                    group,
                    MessageChain(f"没有找到名为{dic}的字典!已有字典:{'、'.join(word_dics)}"))
                mutex.release()
                return None
            else:
                group_word_dic[group.id] = dic
        elif group.id not in group_word_dic:
            group_word_dic[group.id] = DEFAULT_DIC
        group_running[group.id] = True
        mutex.release()
    single = single_game.matched
    length = int(length.result.asDisplay().split('=')
                 [1].strip()) if length.matched else 5
    if length not in word_list[group_word_dic[group.id]].keys():
        await app.sendGroupMessage(
            group,
            MessageChain(
                f"单词长度错误,词库中没有长度为{length}的单词=!"
                f"目前词库({group_word_dic[group.id]})中"
                f"只有长度为{'、'.join([str(i) for i in sorted(word_list[group_word_dic[group.id]].keys())])}的单词!"
            ))
        await mutex.acquire()
        group_running[group.id] = False
        mutex.release()
        return None
    wordle_instance = Wordle(length, dic=group_word_dic[group.id])
    logger.success(f"成功创建 Wordle 实例,单词为:{wordle_instance.word}")
    await app.sendGroupMessage(
        group,
        MessageChain([
            Image(data_bytes=wordle_instance.get_board_bytes()),
            Plain(
                f"\n你有{wordle_instance.row}次机会猜出单词,单词长度为{wordle_instance.length},请发送单词"
            )
        ]),
        quote=message.getFirst(Source))
    game_end = False
    try:
        while not game_end:
            game_end = await inc.wait(WordleWaiter(wordle_instance, group,
                                                   member if single else None),
                                      timeout=300)
    except asyncio.exceptions.TimeoutError:
        await app.sendGroupMessage(group,
                                   MessageChain("游戏超时,进程结束"),
                                   quote=message.getFirst(Source))
        await mutex.acquire()
        group_running[group.id] = False
        mutex.release()
def process_single_event(min_periods, max_periods, taper_tmin_tmax, asdf_filename, waveform_length, sampling_rate, output_directory, logfile, correct_cea, cea_correction_file):
    # with pyasdf.ASDFDataSet(asdf_filename) as ds:
    tmin, tmax = map(float, taper_tmin_tmax.split(","))
    with pyasdf.ASDFDataSet(asdf_filename, mode="r") as ds:

        # load cea correction file
        if(correct_cea):
            correction_data = pd.read_csv(cea_correction_file, sep="|", comment="#", names=[
                "network", "station", "eventno", "mean", "std", "median", "mad", "starttime", "endtime"])
            correction_data["starttime"] = correction_data["starttime"].apply(
                modify_time)
            correction_data["endtime"] = correction_data["endtime"].apply(
                modify_time)

        # add logger information
        logger.add(logfile, format="{time} {level} {message}", level="INFO")

        # some parameters
        event = ds.events[0]
        origin = event.preferred_origin() or event.origins[0]
        event_time = origin.time
        event_latitude = origin.latitude
        event_longitude = origin.longitude

        for min_period, max_period in zip(min_periods, max_periods):
            # log
            if(isroot):
                logger.success(
                    f"[{rank}/{size}] start to process {asdf_filename} from {min_period}s to {max_period}s")
                if(correct_cea):
                    logger.success(
                        f"[{rank}/{size}] will correct cea dataset according to the cea station orientation information")

            f2 = 1.0 / tmax
            f3 = 1.0 / tmin
            f1 = 0.5 * f2
            f4 = 2.0 * f3
            pre_filt = (f1, f2, f3, f4)

            # log
            if(isroot):
                logger.success(
                    f"[{rank}/{size}] {asdf_filename} is filtered with {f1} {f2} {f3} {f4}")

            def process_function(st, inv):
                # log
                logger.info(
                    f"[{rank}/{size}] processing {inv.get_contents()['stations'][0]}")

                # there are possibility that some stations has multiple loc codes or use HH stations. (should avoid in the future)
                st = filter_st(st, inv)

                # overlap the previous trace
                status_code = check_st_numberlap(st, inv)
                if(status_code == -1):
                    return
                elif(status_code == 0):
                    pass
                elif(status_code == 1):
                    # merge may have roblem (samplign rate is not equal)
                    try:
                        st.merge(method=1, fill_value=0,
                                 interpolation_samples=0)
                    except:
                        logger.error(
                            f"[{rank}/{size}] {inv.get_contents()['stations'][0]} error in merging")
                        return
                else:
                    raise Exception("unknown status code")

                status_code = check_time(st, event_time, waveform_length, inv)
                if(status_code == 0):
                    pass
                elif(status_code == -1):
                    logger.error(
                        f"[{rank}/{size}] {inv.get_contents()['stations'][0]} error in cutting data")
                    return
                else:
                    raise Exception("unknown status code")
                # trim will automatically use starttime if starttime>eventtime
                st.trim(event_time, event_time+waveform_length)

                st.detrend("demean")
                st.detrend("linear")
                st.taper(max_percentage=0.05, type="hann")

                # st.remove_response(output="DISP", pre_filt=pre_filt, zero_mean=False,
                #                    taper=False, inventory=inv, water_level=None)
                # here we should use PZ files to remove the response.
                st = remove_response(st, pre_filt=pre_filt, inv=inv)

                # the same of removing response with sac
                st.detrend("demean")
                st.detrend("linear")

                st.interpolate(sampling_rate=sampling_rate)

                # ! have problem here (all value is zero)
                station_latitude = inv[0][0].latitude
                station_longitude = inv[0][0].longitude

                # baz is calculated using station and event's location
                # for cea stations, we can directly add an angle to it
                _, baz, _ = gps2dist_azimuth(station_latitude, station_longitude,
                                             event_latitude, event_longitude)

                network = inv.get_contents()['networks'][0]
                if(correct_cea and (network in CEA_NETWORKS)):
                    baz = func_correct_cea(
                        baz, inv, event_time, correction_data)
                if(baz == None):
                    logger.error(
                        f"[{rank}/{size}] {inv.get_contents()['stations'][0]} error in correcting orientation")
                    return

                # we have to limit baz to be in [0,360)
                baz = np.mod(baz, 360)

                components = [tr.stats.channel[-1] for tr in st]
                if "N" in components and "E" in components:
                    # there may be some problem in rotating (time span is not equal for three channels)
                    try:
                        st.rotate(method="NE->RT", back_azimuth=baz)
                    except:
                        logger.error(
                            f"[{rank}/{size}] {inv.get_contents()['stations'][0]} error in rotating")
                        return
                else:
                    logger.error(
                        f"[{rank}/{size}] {inv.get_contents()['stations'][0]} doesn't have both N and E")
                    return

                # bandpass filter
                st.filter("bandpass", freqmin=1.0/max_period,
                          freqmax=1.0/min_period, corners=2, zerophase=True)

                # Convert to single precision to save space.
                for tr in st:
                    tr.data = np.require(tr.data, dtype="float32")

                return st

            tag_name = "preprocessed_%is_to_%is" % (
                int(min_period), int(max_period))
            tag_map = {
                "raw": tag_name
            }
            output_name_head = asdf_filename.split("/")[-1].split(".")[0]
            ds.process(process_function, join(
                output_directory, output_name_head+"."+tag_name + ".h5"), tag_map=tag_map)

            if(rank == 0):
                logger.success(
                    f"[{rank}/{size}] success in processing {asdf_filename} from {min_period}s to {max_period}s")
Example #27
0
def cmd(ceph_args, output, com):
    cfg = config.get()
    mg_paths = cfg["paths"]

    if output in ["json", "json-pretty"]:
        commands_dir = "must_gather_commands_json_output"
        json_add = "_--format_json-pretty"
    else:
        commands_dir = "must_gather_commands"
        json_add = ""

    file_name = "{}_{}{}".format(com, "_".join(ceph_args), json_add)
    if file_name.startswith("ceph_config_show_"):
        file_name = file_name.replace("ceph_config_show_", "config_")

    ceph_file = os.path.join("ceph", commands_dir, file_name)

    lg.debug("ceph_file: {}".format(ceph_file))

    ceph_cmd_paths = {}
    i = 1
    for p in mg_paths:
        ceph_cmd_path = os.path.join(p, ceph_file)
        if os.path.isfile(ceph_cmd_path):
            lg.info("Command output file found: {}".format(ceph_cmd_path))
            ceph_cmd_paths[i] = ceph_cmd_path
        i += 1

    if ceph_cmd_paths:
        for i, cp in ceph_cmd_paths.items():
            with open(cp, "r") as lf:
                print(lf.read())
            if len(mg_paths) > 1:
                lg.opt(colors=True).success("^^^<e>[{}]</>^^^\n".format(i))
    else:
        suggestions = {}
        i = 1
        for p in mg_paths:
            try:
                files = os.listdir(
                    os.path.join(p, "ceph", "must_gather_commands"))
                file_match = "{}_{}".format(com, "_".join(ceph_args))
                sugg = []
                sugg.extend([
                    "omg " + f.replace("_", " ") for f in files
                    if f.startswith(file_match)
                ])
                sugg.extend([
                    "omg ceph config show {}".format(f.replace("config_", ""))
                    for f in files if f.startswith("config_")
                ])
                if sugg:
                    suggestions[i] = sugg
            except Exception as e:
                lg.debug(e)
                pass
            i += 1
        if suggestions:
            lg.success("\nNote: Output of following commands are available:\n")
            for i, sugg in suggestions.items():
                lg.success("\n".join(sugg))
                lg.opt(colors=True).success("^^^<e>[{}]</>^^^\n".format(i))
        else:
            lg.error("Command output not found in any of the"
                     " {} must-gather paths".format(len(mg_paths)))
Example #28
0
    def _x_fasterqd(self, version_only=False):
        """
        Call fasterq-dump on SRR id to convert .sra file to .fastq files.
        """
        if version_only:
            # print the version
            proc = sps.Popen(
                [self.binaries["fasterq-dump"], "-V"],
                stderr=sps.STDOUT,
                stdout=sps.PIPE,
            )
            out = proc.communicate()[0].decode().split()[-1]
            return out

        # command and logger message
        cmd = [
            self.binaries["fasterq-dump"],
            self.srr,
            "-O",
            os.path.join(self.workdir, self.srr),
            "-t",
            os.path.join(self.workdir, self.srr),
        ]
        null = "{fasterq-dump} {srr} -O {workdir}/{srr} -t {workdir}/{srr}"
        logger.info("Executing: {}".format(null))
        logger.debug("Executing: {}".format(" ".join(cmd)))

        # call the tool
        proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
        out = proc.communicate()
        if proc.returncode:
            logger.error("Failed: {}".format(out[0].decode()))
            logger.error(
                "If you encountered a disk full error but believe you have "
                "sufficient space available in your working dir then the "
                "is being caused by the insane behavior of the sra-tools "
                "package which hides large tmp file in obscure places. "
                "You can turn off this behavior by running 'vdb-config -i'."
                "Turn off the 'enable local file caching' option.")
            raise TypeError(out[0].decode())

        # write a tmp SRR.lib file
        libfile = os.path.join(self.workdir, self.srr,
                               "{}_files.lib".format(self.srr))
        fastqs = glob.glob(os.path.join(self.workdir, self.srr, "*.fastq"))
        with open(libfile, 'w') as out:
            out.write("\n".join(fastqs))

        # show file size result
        f1 = self.srr + "_1.fastq"
        fastq1 = os.path.join(self.workdir, self.srr, f1)
        size1 = os.path.getsize(fastq1)
        size1 = round(size1 / 1e9, 2)

        f2 = self.srr + "_2.fastq"
        fastq2 = os.path.join(self.workdir, self.srr, f2)
        size2 = os.path.getsize(fastq2)
        size2 = round(size2 / 1e9, 2)

        logger.success("Fastq dumped {} ({} Gb)".format(f1, size1))
        logger.success("Fastq dumped {} ({} Gb)".format(f2, size2))
Example #29
0
 def setup(self):
     self.remote.setup()
     LOG.success(f"Remote client {self.name} setup")
Example #30
0
 def train(
     self,
     train_dataloader: DataIterator,
     validation_dataloader: DataIterator,
 ) -> Dict[str, float]:
     mi_not_improved = 0
     for epoch in range(self._epochs):
         # Train
         self._pytorch_model.train()
         logger.info("Training")
         train_metrics = self._fit(train_dataloader)
         # Log metrics only on master with run_on_rank_zero decorator
         training_util.log_metrics(
             mode_str="Training",
             info={
                 "epoch": epoch,
                 "aggressive": self._aggressive
             },
             metrics=train_metrics,
         )
         # Validation
         logger.info("Validation")
         validation_metrics = self.evaluate(validation_dataloader,
                                            info={
                                                "epoch": epoch,
                                                "aggressive":
                                                self._aggressive
                                            })
         # Check mutual info to finish aggressive training if needed
         if self._aggressive and self._model.is_kl_used:
             mi_not_improved += 1
             # 5 is an expected number of aggressive epochs based on experiments from the paper
             if mi_not_improved == 5:
                 self._aggressive = False
                 logger.info("Stop aggressive burning.")
         if self._metric_patience:
             self._metric_patience(validation_metrics)
         # Save model state only on master
         if self._is_master:
             self._save_checkpoint(
                 validation_metrics,
                 is_best_so_far=self._metric_patience.improved
                 if self._metric_patience else True,
                 save_dict={
                     "model": self._model.state_dict(),
                     "encoder_optimizer":
                     self._encoder_optimizer.state_dict(),
                     "decoder_optimizer":
                     self._decoder_optimizer.state_dict(),
                     "encoder_scheduler":
                     self._encoder_scheduler.state_dict(),
                     "decoder_scheduler":
                     self._decoder_scheduler.state_dict(),
                     **validation_metrics
                 },
             )
         # Wait for master process to save new checkpoint
         if self._distributed:
             dist.barrier()
         if self._metric_patience.should_stop if self._metric_patience else False:
             logger.success("Patience reached. Stop training.")
             logger.info("Best metrics: {}".format(
                 json.dumps(self._metric_patience.best_metrics,
                            ensure_ascii=False,
                            indent=2)))
             break
     return self._metric_patience.best_metrics if self._metric_patience else validation_metrics