示例#1
0
def shutdown():
    global state
    state = NetworkState.SHUTTING_DOWN

    netManager = NetworkManager.NetworkManager()
    netManager.turnOff()
    netManager.join()

    state = NetworkState.SHUT_DOWN
示例#2
0
def initialise():
    global state
    if state != NetworkState.UNINITIALISED:
        GeneralLogger().logger.warn(
            f'Network API already initialised, can\'t initialise now. [State={state}]'
        )
    else:
        state = NetworkState.INITIALISING
        networkManager = NetworkManager.NetworkManager()
        networkManager.initialise()
示例#3
0
 def generate_network_manager(self, selected, stock_nr, rank):
     network_manager = nm.NetworkManager(self, selected, stock_nr,
                                         self.run_nr, self.rf_rate,
                                         self.soft_label,
                                         self.soft_label_percent)
     stock_result = network_manager.build_networks(
         number_of_networks=self.number_of_networks,
         epochs=self.epochs,
         rank=rank)
     self.add_to_stock_results(stock_result, network_manager)
     return stock_result
示例#4
0
    else:
        lockFile.lock()
    try:

        FPSCounter = FPS()
        FPSCounter.start()

        cam = cv2.VideoCapture(0)
        cam.set(3,640)
        cam.set(4,480)
        #cam.set(cv2.cv.CV_CAP_PROP_BRIGHTNESS, brightness)
        #cam.set(cv2.cv.CV_CAP_PROP_SATURATION, saturation)
        #cam.set(cv2.cv.CV_CAP_PROP_EXPOSURE, exposure) # not working on the old camera

        visionManager = VisionManager(LB, UB, MBR, cam, KH, KW, FL, [RH,RW,RL], TH, CUW, CUWD, HAX, HAY)
        networkManager = NetworkManager(JAVA_IP,8080)

        ###################
        # The code itself #
        ###################

        while True:

            visionManager.updateImage()
            visionManager.updateTowerScales()
            visionManager.updateRobotScales()
            FPSCounter.update()

            if visionManager.isObjectDetected: # if an object was detected

                ######################
示例#5
0
 def __init__(self):
     self.running = True
     self.waitingQueue = deque()
     self.networkManager = NetworkManager(self)
     self.battles = []
示例#6
0
文件: nm.py 项目: rayman18/wydev
	i: 255(t)
	o: 312()

nname: 255
n 255(func_name.startswith('magic'))[for func_name, func in locals().items():
]:
	i: 239(for)
	o: 279(t), 312(f)

nname: 239
n 239(None)[]:
	i: 98(loop)
	o: 255(for), 316(AF)

nname: 98
n 98(None)[NM = NetworkManager()
ipshell = IPShellEmbed()
def _get_device_by_name(dev_name):
	devs = NM.get_devices()
	d = filter((lambda dev: dev.get_name() == dev_name), devs)
	if d == []:
		raise NameError('No such interface (%s)' % dev_name)
	return d[0]

def _get_network_by_name(dev, essid):
	nets = dev.get_networks()
	n = filter((lambda net: net.get_name() == essid), nets)
	if n == []:
		raise NameError('No such ESSID (%s)' % net_name)
	return n[0]
示例#7
0
    if lockFile.is_locked():
        raise ValueError("Lock File Locked!")
    else:
        lockFile.lock()

    args = parse_arguments()

    try:
        vision_manager = VisionManager()
    except ValueError, ex:
        logger.error("Failed initializing VisionManager: %s", ex.message)
        raise

    if args.enable_network:
        try:
            network_manager = NetworkManager(args.host, args.port)
        except Exception, ex:
            logger.error("Unhandled Exception:\n" + traceback.format_exc())
            raise
    else:
        network_manager = None

    FPSCounter = FPS()
    FPSCounter.start()

    try:
        read_frames = 0  # the number of frames read so far

        while True:
            if args.dump_image and (read_frames % 30 == 0):
                save_frame_path = "current.png"
示例#8
0
def pingDevice(target):
    pingMessage = MessageFactory.createPingMessage(target)
    NetworkManager.NetworkManager().outgoingMessages.put(pingMessage)
示例#9
0
def sendCommand(targetName, commandString):
    commandMessage = MessageFactory.createCommandMessage(
        targetName, commandString)
    NetworkManager.NetworkManager().outgoingMessages.put(commandMessage)
示例#10
0
    def long_train_network(self,
                           params,
                           train_pool,
                           val_pool,
                           test_pool,
                           checkpoint=None,
                           test_network_only=False):
        self.parameter_dict = params

        # Run for many minutes, or until loss decays significantly.
        self.parameter_dict['training_early_stop'] = self.parameter_dict[
            'long_training_time']

        if checkpoint is not None:
            log_file_name = checkpoint
        else:
            log_file_name = "best-" + str(time.time())

        training_batch_handler = BatchHandler.BatchHandler(
            train_pool, self.parameter_dict, True)
        validation_batch_handler = BatchHandler.BatchHandler(
            val_pool, self.parameter_dict, False)
        test_batch_handler = BatchHandler.BatchHandler(test_pool,
                                                       self.parameter_dict,
                                                       False)

        # Add input_size, num_classes
        self.parameter_dict[
            'input_size'] = training_batch_handler.get_input_size()
        self.parameter_dict[
            'num_classes'] = training_batch_handler.get_num_classes()

        netManager = NetworkManager.NetworkManager(self.parameter_dict,
                                                   log_file_name)

        if not test_network_only:
            netManager.build_model(self.encoder_means, self.encoder_stddev)
            best_results = self.train_network(netManager,
                                              training_batch_handler,
                                              validation_batch_handler)
        else:
            # We are loading a network from a checkpoint
            netManager.build_model()
            best_results = {}
        best_results['test_accuracy'], best_results[
            'test_loss'], report_df = self.test_network(
                netManager, test_batch_handler)

        #print "Drawing html graph" # I don't read these graphs anymore
        #netManager.draw_html_graphs(netManager.compute_result_per_dis(test_batch_handler))
        #if self.parameter_dict['model_type'] == 'categorical':
        #    netManager.draw_categorical_html_graphs(test_batch_handler)
        #else:
        #    netManager.draw_generative_html_graphs(test_batch_handler, multi_sample=1)
        #netManager.draw_generative_html_graphs(test_batch_handler, multi_sample=20)

        # FIXME maybe this needs its own function?
        for key, value in best_results.iteritems():
            if (type(value) is list or type(value) is np.ndarray
                    or type(value) is tuple):
                best_results[key] = pd.Series([value], dtype=object)
        best_results = pd.DataFrame(best_results, index=[0])
        if not test_network_only:
            best_results.to_csv(
                os.path.join(self.parameter_dict['master_dir'], "best.csv"))

        # Check that every track in the report_df is at distance zero.
        reports = ReportWriter.ReportWriter(
            training_batch_handler, validation_batch_handler,
            test_batch_handler, self.parameter_dict,
            report_df)  #'results/20180412-120830/plots_img_final'
        #reports.get_results().to_csv(os.path.join(self.parameter_dict['master_dir'],"metrics.csv"))
        for key, value in reports.get_results().iteritems():
            pd.DataFrame(value).to_csv(
                os.path.join(self.parameter_dict['master_dir'],
                             key + '-' + "metrics.csv"))

        # SPAGHETTI WARNING
        # This was done in a rush for a journal plot
        # it will plot the output graphs for several distance horizons, to create an effective animation
        # return best_results
        plot_tracks_at_distances = True
        if plot_tracks_at_distances:
            for d in [
                    -5, 0, 5, 10, 20
            ]:  #np.arange(-16,22,2): #[-15, -10, -5, 0, 5, 10, 15, 20]:
                print "Now running report for distance: " + str(d) + " meters"
                try:
                    _, _, report_df = self.test_network(netManager,
                                                        test_batch_handler,
                                                        distance=d)
                    print "Number of tracks is: " + str(
                        len(report_df.track_idx.unique()))
                except ValueError:
                    # No data for this distance
                    continue

                try:
                    cluster_mix_weight_threshold = self.parameter_dict[
                        'cluster_mix_weight_threshold']
                except KeyError:
                    cluster_mix_weight_threshold = 0.5
                try:
                    cluster_eps = float(self.parameter_dict['cluster_eps'])
                except KeyError:
                    cluster_eps = 1.0
                try:
                    cluster_min_samples = self.parameter_dict[
                        'cluster_min_samples']
                except KeyError:
                    cluster_min_samples = 1

                pool = mp.Pool(processes=7, maxtasksperchild=1)
                args = []
                plt_size = (6, 6)  # (10, 10)
                plot_dir = os.path.join(self.parameter_dict['master_dir'],
                                        'sequential_test_data_plots')
                if not os.path.exists(plot_dir):
                    os.makedirs(plot_dir)

                for track_idx in report_df.track_idx:
                    model_predictions = {}
                    track_df = report_df[report_df.track_idx == track_idx]
                    if 'right' not in track_df.relative_destination.iloc[0]:
                        continue
                    #if track_df.track_idx.iloc[0] not in [16780]:
                    #   continue

                    #model_predictions["RNN-FL"] = track_df.outputs.iloc[0]
                    path_MDN_clusters, path_centroids, path_weights = MDN_clustering.cluster_MDN_into_sets(
                        report_df[report_df.track_idx ==
                                  track_idx].mixtures.iloc[0],
                        mix_weight_threshold=cluster_mix_weight_threshold,
                        eps=cluster_eps,
                        min_samples=cluster_min_samples)
                    for centroid_idx in range(len(path_centroids)):
                        model_predictions['multipath_' +
                                          str(centroid_idx)] = np.array(
                                              path_centroids[centroid_idx])

                    for padding_mask in ['Network']:
                        args.append([
                            track_df.encoder_sample.iloc[0],
                            model_predictions,
                            track_df.decoder_sample.iloc[0],  # Ground Truth
                            track_df.mixtures.iloc[0],
                            track_df.padding_logits.iloc[0],
                            track_df.trackwise_padding.iloc[0],
                            plt_size,
                            False,  # draw_prediction_track,
                            plot_dir,  # self.plot_directory,
                            "best",  # self.log_file_name,
                            False,  # multi_sample,
                            0,  # self.get_global_step(),
                            track_idx,  # graph_number,
                            plot_dir,  # fig_dir,
                            track_df.csv_name.iloc[0],
                            track_df.relative_destination.iloc[0],
                            utils.sanitize_params_dict(self.parameter_dict),
                            padding_mask,
                            d
                        ])
                results = pool.map(utils_draw_graphs.multiprocess_helper, args)

        return best_results
示例#11
0
        def hyper_training_helper(hyper_learning_rate, hyper_rnn_size,
                                  hyper_reg_embedding_beta, hyper_reg_l2_beta,
                                  hyper_learning_rate_decay,
                                  hyper_learning_rate_min,
                                  padding_loss_logit_weight,
                                  padding_loss_mixture_weight):
            """ 
            Function used to wrap the hyperparameters and settings such that it fits the format used by dlib.
            Some variables need to be side-loaded, mostly reporting values.
            """
            ############# SELECT NEW PARAMS
            self.parameter_dict['learning_rate'] = 10**hyper_learning_rate
            self.parameter_dict['rnn_size'] = int(hyper_rnn_size)
            self.parameter_dict[
                'reg_embedding_beta'] = 10**hyper_reg_embedding_beta
            self.parameter_dict['l2_reg_beta'] = 10**hyper_reg_l2_beta
            self.parameter_dict[
                'learning_rate_decay_factor'] = hyper_learning_rate_decay
            self.parameter_dict['learning_rate_min'] = \
                (10 ** hyper_learning_rate_min) * self.parameter_dict['learning_rate']
            self.parameter_dict['embedding_size'] = self.parameter_dict[
                'rnn_size']
            self.parameter_dict[
                'padding_loss_logit_weight'] = padding_loss_logit_weight
            self.parameter_dict[
                'padding_loss_mixture_weight'] = padding_loss_mixture_weight

            # Update Cutoffs
            self.parameter_dict['long_training_time'] = self.parameter_dict[
                'early_stop_cf']
            self.parameter_dict['long_training_steps'] = self.parameter_dict[
                'hyper_search_step_cutoff']
            ######### / PARAMS
            print 'learning_rate               ' + str(10**hyper_learning_rate)
            print 'rnn_size                    ' + str(hyper_rnn_size)
            print 'reg_embedding_beta          ' + str(
                10**hyper_reg_embedding_beta)
            print 'l2_reg_beta                 ' + str(10**hyper_reg_l2_beta)
            print 'learning_rate_decay_factor  ' + str(
                hyper_learning_rate_decay)
            print 'padding_loss_logit_weight   ' + str(
                padding_loss_logit_weight)
            print 'padding_loss_mixture_weight ' + str(
                padding_loss_mixture_weight)

            cf_fold = -1
            # I should call this outside the crossfold, so it occurs once
            # This way all the crossfolds for the same hyperparameters are adjacent in the checkpoint dirs
            log_file_time = str(time.time())
            cf_results_list = []

            for train_pool, val_pool in self.cf_pool:
                cf_fold += 1
                log_file_name = log_file_time + "-cf-" + str(cf_fold)

                print "Starting crossfold"
                # Collect batch_handlers, and check if they've been cached.
                try:
                    training_batch_handler = training_batch_handler_cache[hash(
                        tuple(np.sort(train_pool.uniqueId.unique())))]
                except KeyError:
                    training_batch_handler = BatchHandler.BatchHandler(
                        train_pool, self.parameter_dict, True)
                except AttributeError:
                    print 'This should not be attainable, as crossfold==2 is invalid'

                try:
                    validation_batch_handler = validation_batch_handler_cache[
                        hash(tuple(np.sort(val_pool.uniqueId.unique())))]
                except KeyError:
                    validation_batch_handler = BatchHandler.BatchHandler(
                        val_pool, self.parameter_dict, False)
                except AttributeError:
                    print 'This should not be attainable, as crossfold==2 is invalid'

                # Add input_size, num_classes
                self.parameter_dict[
                    'input_size'] = training_batch_handler.get_input_size()
                self.parameter_dict[
                    'num_classes'] = training_batch_handler.get_num_classes()

                netManager = NetworkManager.NetworkManager(
                    self.parameter_dict, log_file_name)
                netManager.build_model(self.encoder_means, self.encoder_stddev)

                try:
                    cf_results = self.train_network(netManager,
                                                    training_batch_handler,
                                                    validation_batch_handler,
                                                    hyper_search=True)
                except tf.errors.InvalidArgumentError:
                    print "**********************caught error, probably gradients have exploded"
                    return 99999999  # HUGE LOSS --> this was caused by bad init conditions, so it should be avoided.
                # Now assign the handlers to the cache IF AND ONLY IF the training was successful.
                # If it dies before the first pool sort in the training, the whole thing falls over.
                validation_batch_handler_cache[hash(
                    tuple(np.sort(val_pool.uniqueId.unique()))
                )] = validation_batch_handler
                training_batch_handler_cache[hash(
                    tuple(np.sort(train_pool.uniqueId.unique()))
                )] = training_batch_handler

                cf_results['crossfold_number'] = cf_fold
                # As pandas does not like lists when adding a list to a row of a dataframe, set to None (the lists are
                # a large amount of redundant data). This is why I copy out parameters.py
                for key, value in cf_results.iteritems():
                    if (type(value) is list or type(value) is np.ndarray
                            or type(value) is tuple):
                        cf_results[key] = pd.Series([value], dtype=object)
                cf_results_list.append(pd.DataFrame(cf_results, index=[0]))

                # plot
                print "Drawing html graph"
                if self.parameter_dict['model_type'] == 'categorical':
                    netManager.draw_categorical_html_graphs(
                        validation_batch_handler)
                else:
                    netManager.draw_generative_html_graphs(
                        validation_batch_handler, multi_sample=1)
                    netManager.draw_generative_html_graphs(
                        validation_batch_handler, multi_sample=20)

                # Here we have a fully trained model, but we are still in the cross fold.

                # FIXME Only do 1 fold per hyperparams. Its not neccessary to continue
                break

            # Run reportwriter here and return all_tracks..... euclidean loss?
            val_acc, val_loss, report_df =\
                self.test_network(netManager, validation_batch_handler)

            cf_df = pd.concat(cf_results_list)
            # Condense results from cross fold (Average, best, worst, whatever selection method)
            hyperparam_results = copy.copy(self.parameter_dict)
            #hyperparam_results['input_columns'] = ",".join(hyperparam_results['input_columns'])
            hyperparam_results['eval_accuracy'] = np.min(
                cf_df['eval_accuracy'])
            hyperparam_results['final_learning_rate'] = np.min(
                cf_df['final_learning_rate'])
            hyperparam_results['training_accuracy'] = np.min(
                cf_df['training_accuracy'])
            hyperparam_results['training_loss'] = np.average(
                cf_df['training_loss'])
            hyperparam_results['validation_accuracy'] = np.average(
                cf_df['validation_accuracy'])
            hyperparam_results['validation_loss'] = np.average(
                cf_df['validation_loss'])

            track_scores = ReportWriter.ReportWriter.score_model_on_metric(
                self.parameter_dict, report_df)
            hyperparam_results['euclidean_err_sum'] = sum(
                track_scores['euclidean'])
            hyperparam_results['crossfold_number'] = -1
            #FIXME What is this line doing?
            hyperparam_results['network_chkpt_dir'] = (cf_df.sort_values(
                'eval_accuracy',
                ascending=False).iloc[[0]]['network_chkpt_dir'])
            hyperparam_results['cf_summary'] = True
            for key, value in hyperparam_results.iteritems():
                if (type(value) is list or type(value) is np.ndarray
                        or type(value) is tuple):
                    hyperparam_results[key] = pd.Series(
                        [value], dtype=object)  # str(cf_results[key])
            hyperparam_results_list.append(
                pd.DataFrame(hyperparam_results, index=[0]))
            hyperparam_results_list.append(cf_df)
            #Write results and hyperparams to hyperparameter_results_dataframe

            return hyperparam_results['euclidean_err_sum']
示例#12
0
    def __init__(self):
        pygame.init()
        pygame.font.init()
        self.font16 = pygame.font.Font("PKMN RBYGSC.ttf", 16)
        self.font32 = pygame.font.Font("PKMN RBYGSC.ttf", 32)
        self.font64 = pygame.font.Font("PKMN RBYGSC.ttf", 64)
        #must be something real troll to use anything bigger than 64
        self.font128 = pygame.font.Font("PKMN RBYGSC.ttf", 128)
        self.font256 = pygame.font.Font("PKMN RBYGSC.ttf", 256)
        self.rapper = pygame.image.load('NAKEDMAN2.png')
        self.rapperw = self.rapper.get_rect().width
        self.gangster = pygame.image.load('NAKEDMAN3.png')
        self.gangsterw = self.gangster.get_rect().width
        self.child = pygame.image.load('NAKEDMAN4.png')
        self.childw = self.child.get_rect().width
        self.hobo = pygame.image.load('NAKEDMAN.png')
        self.hobow = self.hobo.get_rect().width
        self.nerd = pygame.image.load('NAKEDMAN5.png')
        self.nerdw = self.nerd.get_rect().width
        self.select = pygame.image.load('select.png')
        self.selectw = self.select.get_rect().width
        self.wait = False
        # Processes events (essentially all inputs)
        self.eventManager = EventManager(self)

        # Brings up a TextInput that'll be used to enter in text
        self.textInput = TextInput(self)
        self.p1name = ''
        self.p2name = ''

        pygame.display.set_caption("POKEMANS")
        self.screen = pygame.display.set_mode((800, 600))
        self.streak = 0
        self.running = True
        self.state = "Pre-Login"
        
        self.battlestr = ''
        
        self.draft = [pokeman(1,0),pokeman(2,0),pokeman(3,0)] # The pokemans in the draft
        self.pokemans = [] # The pokemans selected
        self.gameState = [] # gameState in battle

        # Sends an alive packet periodically to the server to tell that a player is still online
        self.aliveClock = pygame.time.Clock()
        self.aliveTimer = 0 # Timer to keep track of the clock

        self.sel = 0
        self.login = 0
        self.timer1 = 0
        self.preb_timer = 0
        self.preb = True
        self.flash1 = False
        self.networkManager = NetworkManager(self)
        
        colorPicker = randint(0, 5)
        backgroundColor = None
        if colorPicker == 0:
            backgroundColor = (255, 0, 0)
        elif colorPicker == 1:
            backgroundColor = (0, 255, 0)
        elif colorPicker == 2:
            backgroundColor = (0, 0, 255)
        elif colorPicker == 3:
            backgroundColor = (255, 255, 0)
        elif colorPicker == 4:
            backgroundColor = (0, 255, 255)
        elif colorPicker == 5:
            backgroundColor = (255, 0, 255)

        self.background = pygame.Surface((800, 600))
        self.background.set_alpha(50)
        self.background.fill(backgroundColor)

        self.textScroll = TextScroll(self)