Пример #1
0
    def convert_genotypes(self):

        chunk_size = self.split_size
        if chunk_size is None:
            raise ValueError(
                'CONVERTER_SPLIT_SIZE does not define in config file!')
        G = np.array([])
        #self.reader.folder.processed=0
        while True:
            with Timer() as t:
                G = self.reader.folder.get_bed(chunk_size)
                if isinstance(G, type(None)):
                    break

            print('Time to read {} SNPs is {} s'.format(G.shape[0], t.secs))

            self.write_data('gen')
            atom = tables.Int8Atom()
            self.genotype = self.h5_gen_file.create_carray(
                self.h5_gen_file.root,
                'genotype',
                atom, (G.shape),
                title='Genotype',
                filters=self.pytable_filters)
            with Timer() as t:
                self.genotype[:] = G

            print('Time to write {} SNPs is {} s'.format(G.shape[0], t.secs))

            self.h5_gen_file.close()
            G = None
            gc.collect()
Пример #2
0
class Test:
    def __init__(self, *args, **kwargs):
        '''
        Do not overload me!
        '''
        self.testStart(*args, **kwargs)
        self.TEST_TIMER = Timer()
        self.PERIODIC_DONE = False
        self.END_DONE = False

    def run(self):
        '''
        Execute the test on the robot.

        Call this method in testPeriodic or another periodic method.
        '''
        self.TEST_TIMER.wait(1)

        if self.TEST_TIMER.is_done():
            self.PERIODIC_DONE = True

        if self.END_DONE:
            pass
        elif self.PERIODIC_DONE:
            self.testEnd()
            self.END_DONE = True
        else:
            self.testPeriodic()

    def testStart(self, *args, **kwargs):
        '''
        This is called to initialize your test.
        Instantiate any data associated with your test here.

        Any arguments passed to __init__ will also be passed to this method.

        Overload me!
        '''
        pass

    def testPeriodic(self):
        '''
        This is called to update your test.
        Put any periodic code here.

        Overload me!
        '''
        pass

    def testEnd(self):
        '''
        This is called to destruct your test.
        Anything used to tie off loose ends after your period methods should be called here.

        Overload me!
        '''
        pass

    def is_done(self):
        return self.END_DONE
def _script_for_patch_feature(head, tail, device, dataset='mscoco', data_split='train'):
    data_dir = osp.join(DATA_ROOT, dataset)

    # set the model path here
    prototxt = osp.join(CAFFE_ROOT, 'models', 'ResNet', 'ResNet-152-deploy.prototxt')
    caffemodel = osp.join(CAFFE_ROOT, 'models', 'ResNet', 'ResNet-152-model.caffemodel')

    # load network
    net, transformer = load_network(prototxt, caffemodel, device)

    # prepare image files
    cap_file = osp.join(data_dir, 'captions_{}.json'.format(data_split))
    image_ids = json.load(open(cap_file, 'r'))['image_ids']
    im_files = ['COCO_{}2014_'.format(data_split) + str(i).zfill(12) + '.jpg' for i in image_ids]
    im_files = [osp.join(data_dir, data_split+'2014', i) for i in im_files]
    with h5py.File(osp.join(data_dir, 'bbox.h5'), 'r') as f:
        bbox = np.array(f[data_split])

    # initialize h5 file
    save_file = osp.join(data_dir, 'features', 'features_30res.h5')
    with h5py.File(save_file) as f:
        if data_split not in f:
            f.create_dataset(data_split, [len(im_files), 30, 2048], 'float32')

    # computing
    timer = Timer()
    print '\n\n... computing'
    for i in xrange(head, tail):
        timer.tic()
        im = caffe.io.load_image(im_files[i])  # (h, w, c)
        with h5py.File(save_file) as f:
            feat = cnn_patch_feature(net, transformer, im, bbox[i, :, :])
            f[data_split][i, :] = feat
        print '[{:d}]  {}  [{:.3f} sec]'.format(i, osp.split(im_files[i])[-1], timer.toc())
Пример #4
0
    def __init__(self, kS, kV, trackwidth, trajectory):
        '''
        Creates a controller for following a PathWeaver trajectory.

        __init__(self, kS: Volts, kV: Volts * Seconds / Meters, trackwidth: Meters, trajectory: wpilib.trajectory.Trajectory)

        :param kS: The kS gain determined by characterizing the Robot's drivetrain
        :param kV: The kV gain determined by characterizing the Robot's drivetrain
        :param trackwidth: The horizontal distance between the left and right wheels of the tank drive.
        :param trajectory: The trajectory to follow. This can be generated by PathWeaver, or made by hand.
        '''

        self.kS = kS
        self.kV = kV

        self.trajectory = trajectory

        self.odometry = DifferentialDriveOdometry(
            Rotation2d(radians(0)), self.trajectory.initialPose())

        self.ramsete = RamseteController(2, 0.7)
        self.drive_kinematics = DifferentialDriveKinematics(trackwidth)

        self.are_wheel_speeds_zero = False
        self.timer = Timer()
Пример #5
0
def empirical_evaluation(G, interval):
    """Empirical evaluation
    Plot the computation time (y-axis) given the number of nodes (x-axis)
    of subgraphs, for each interval of nodes.
    """
    X = list(range(interval, len(G.nodes) + 1, interval))
    Y_our = []
    Y_nx = []

    for i in X:
        sub_G = G.subgraph(list(G.nodes)[0:i])
        with Timer(f"Our betweenness_centrality ({i} nodes)") as t:
            bc.betweenness_centrality(sub_G)
            Y_our.append(t.get_time())
        with Timer(f"NetworkX betweenness_centrality ({i} nodes)") as t:
            nx.betweenness_centrality(sub_G)
            Y_nx.append(t.get_time())
        print("---")

    plt.figure()
    plt.title("Computing time of the function Betweenness centrality")
    plt.xlabel("Number of nodes")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our, label="Our implementation")
    plt.plot(X, Y_nx, label="NetworkX implementation")
    plt.legend()
Пример #6
0
 def __init__(self, *args, **kwargs):
     '''
     Do not overload me!
     '''
     self.testStart(*args, **kwargs)
     self.TEST_TIMER = Timer()
     self.PERIODIC_DONE = False
     self.END_DONE = False
Пример #7
0
    def __init__(self):
        '''
        '''
        self.feed_motor = SparkMax(MAGAZINE_FEED_MOTOR)
        self.left_agitator = SparkMax(MAGAZINE_LEFT_MOTOR)
        self.right_agitator = SparkMax(MAGAZINE_RIGHT_MOTOR)

        # Timer used to get motor up to speed
        self.timer = Timer()
Пример #8
0
 def startup(self, now, persistant):
     self.state_machine.setup_states({
         'GAME': Game(),
         'CREDITS': Credits()
     }, 'CREDITS')
     self.state_machine.state.startup(now, persistant)
     self.should_flip = False
     self.blink = True
     self.timer_delay = Timer(1000)
Пример #9
0
    def __init__(self):
        self.clockwise_limit_switch = DigitalInput(
            TURRET_CLOCKWISE_LIMIT_SWITCH)
        self.counterclockwise_limit_switch = DigitalInput(
            TURRET_COUNTERCLOCKWISE_LIMIT_SWITCH)

        self.turn_motor = SparkMax(TURRET_TURN_MOTOR)
        self.turn_pid = PIDController(0.4, 0.001, 0.02)

        self.shoot_motor_1 = Falcon(TURRET_SHOOT_MOTORS[0])
        self.shoot_motor_2 = Falcon(TURRET_SHOOT_MOTORS[1])
        self.timer = Timer()

        self.limelight = Limelight()
 def startup(self, now, persistant):
     state_machine.State.startup(self, now, persistant)
     self.number_plateform_save = LandingConfig.numberOfPlateforms
     self.gravity_save = LanderConfig.GRAVITY
     LanderConfig.GRAVITY = 0
     LandingConfig.numberOfPlateforms = 0
     self.draw_credits = False
     self.timer_delay = Timer(CreditConfig.DELAY_BETWEEN_CREDITS)
     self.land = Landing()
     self.aircraft = Lander()
     self.aircraft.fuel = 1000
     self.timer_display_message = Timer(CreditConfig.CREDIT_DURATION)
     self.aircraft_team = pygame.sprite.Group(self.aircraft)
     self.aircraft.orientation = 0
     self.credit_index = 0
Пример #11
0
def benchmark_girvan_newman(G):
    print("Starting benchmark_girvan_newman...")
    max_iteration_level = 10

    Y_our = []
    Y_nx = []

    our_it = our_girvan_newman(G)
    nx_it = nx_girvan_newman(G)

    our_it = itertools.islice(our_it, max_iteration_level)
    nx_it = itertools.islice(nx_it, max_iteration_level)

    i = 0
    while True:
        try:
            with Timer(f"Starting iteration {i} on our girvan newman") as t:
                next(our_it)
                Y_our.append(t.get_time())
            with Timer(f"Starting iteration {i} on nx girvan newman") as t:
                next(nx_it)
                Y_nx.append(t.get_time())
        except StopIteration:
            break
        i += 1

    X = list(range(len(Y_our)))
    plt.figure()
    plt.title("Time over iteration for executing girvan newman")
    plt.xlabel("Iteration level []")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our, label="Our implementation")
    plt.plot(X, Y_nx, label="Networkx implementation")
    plt.legend()
    plt.savefig("benchmark_girvan_newman.png")

    Y_our_cumulative = get_cumulative_array(Y_our)
    Y_nx_cumulative = get_cumulative_array(Y_nx)

    plt.figure()
    plt.title("Cumulative time over iteration for executing girvan newman")
    plt.xlabel("Iteration level []")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our_cumulative, label="Our implementation")
    plt.plot(X, Y_nx_cumulative, label="Networkx implementation")
    plt.legend()
    plt.savefig("benchmark_girvan_newman_cumulative.png")
Пример #12
0
def _script_for_patch_feature(head,
                              tail,
                              device,
                              dataset='mscoco',
                              data_split='train'):
    data_dir = osp.join(DATA_ROOT, dataset)

    # set the model path here
    prototxt = osp.join(CAFFE_ROOT, 'models', 'ResNet',
                        'ResNet-152-deploy.prototxt')
    caffemodel = osp.join(CAFFE_ROOT, 'models', 'ResNet',
                          'ResNet-152-model.caffemodel')

    # load network
    net, transformer = load_network(prototxt, caffemodel, device)

    # prepare image files
    cap_file = osp.join(data_dir, 'captions_{}.json'.format(data_split))
    image_ids = json.load(open(cap_file, 'r'))['image_ids']
    im_files = [
        'COCO_{}2014_'.format(data_split) + str(i).zfill(12) + '.jpg'
        for i in image_ids
    ]
    im_files = [osp.join(data_dir, data_split + '2014', i) for i in im_files]
    with h5py.File(osp.join(data_dir, 'bbox.h5'), 'r') as f:
        bbox = np.array(f[data_split])

    # initialize h5 file
    save_file = osp.join(data_dir, 'features', 'features_30res.h5')
    with h5py.File(save_file) as f:
        if data_split not in f:
            f.create_dataset(data_split, [len(im_files), 30, 2048], 'float32')

    # computing
    timer = Timer()
    print '\n\n... computing'
    for i in xrange(head, tail):
        timer.tic()
        im = caffe.io.load_image(im_files[i])  # (h, w, c)
        with h5py.File(save_file) as f:
            feat = cnn_patch_feature(net, transformer, im, bbox[i, :, :])
            f[data_split][i, :] = feat
        print '[{:d}]  {}  [{:.3f} sec]'.format(i,
                                                osp.split(im_files[i])[-1],
                                                timer.toc())
def test_chip(test_set, rebuilder, transform, save_dir):
    _t = Timer()
    cost_time = list()
    for type in test_set.test_dict:
        img_list = test_set.test_dict[type]
        if not os.path.exists(os.path.join(save_dir, type)):
            os.mkdir(os.path.join(save_dir, type))
        for k, path in enumerate(img_list):
            image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
            _t.tic()
            ori_img, input_tensor = transform(image)
            out = rebuilder.inference(input_tensor)
            re_img = out[0]
            s_map = ssim_seg(ori_img, re_img, win_size=11, gaussian_weights=True)
            mask = seg_mask(s_map, threshold=32)
            inference_time = _t.toc()
            cat_img = np.concatenate((ori_img, re_img, mask), axis=1)
            cv2.imwrite(os.path.join(save_dir, type, '{:d}.png'.format(k)), cat_img)
            cost_time.append(inference_time)
            if (k+1) % 20 == 0:
                print('{}th image, cost time: {:.1f}'.format(k+1, inference_time*1000))
            _t.clear()
    # calculate mean time
    cost_time = np.array(cost_time)
    cost_time = np.sort(cost_time)
    num = cost_time.shape[0]
    num90 = int(num*0.9)
    cost_time = cost_time[0:num90]
    mean_time = np.mean(cost_time)
    print('Mean_time: {:.1f}ms'.format(mean_time*1000))
Пример #14
0
class Magazine:
    '''
    This object is responsible for managing the singulator,
    which is both the intake and the magazine.
    '''
    def __init__(self):
        '''
        '''
        self.feed_motor = SparkMax(MAGAZINE_FEED_MOTOR)
        self.left_agitator = SparkMax(MAGAZINE_LEFT_MOTOR)
        self.right_agitator = SparkMax(MAGAZINE_RIGHT_MOTOR)

        # Timer used to get motor up to speed
        self.timer = Timer()

    def is_ready(self):
        '''
        Checks if the motor underneath the shooter is at maximum voltage.
        '''
        return self.timer.get() > 0.25

    def stop(self):
        '''
        Stops all the motors. 
        Note: It's better to use stopMotor() instead of setting the percentage to zero.
        '''
        self.feed_motor.stopMotor()
        self.left_agitator.stopMotor()
        self.right_agitator.stopMotor()
        self.timer.start()

    def agitate(self):
        '''
        '''
        if self.ready_to_index():
            self.left_agitator.set_percent_output(-0.5)
            self.right_agitator.set_percent_output(0.5)

    def dump(self):
        '''
        '''
        self.feed_motor.set_percent_output(0.5)
        self.agitate()
def test_chip(test_set, rebuilder, transform, save_dir, configs):
    _t = Timer()
    cost_time = list()
    iou_list={}
    s_map_list=list()
    for type in test_set.test_dict:
        img_list = test_set.test_dict[type]
        if not os.path.exists(os.path.join(save_dir, type)):
            os.mkdir(os.path.join(save_dir, type))
            os.mkdir(os.path.join(save_dir, type, 'ori'))
            os.mkdir(os.path.join(save_dir, type, 'gen'))
            os.mkdir(os.path.join(save_dir, type, 'mask'))
        if not os.path.exists(os.path.join(save_dir, type,'ROC_curve')):
            os.mkdir(os.path.join(save_dir, type, 'ROC_curve'))
        for k, path in enumerate(img_list):
            name= path.split('/')[-1]
            image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
            _t.tic()
            ori_img, input_tensor = transform(image)
            out = rebuilder.inference(input_tensor)
            re_img = out[0]
            s_map = ssim_seg(ori_img, re_img, win_size=11, gaussian_weights=True)
            _h, _w = image.shape
            s_map_save = cv2.resize(s_map, (_w, _h))
            s_map_list.append(s_map_save.reshape(-1,1))
            mask = seg_mask(s_map, threshold=128)
            inference_time = _t.toc()
            if configs['db']['resize'] == [832, 832]:
                #cat_img = np.concatenate((ori_img[32:-32,32:-32], re_img[32:-32,32:-32], mask[32:-32,32:-32]), axis=1)
                cv2.imwrite(os.path.join(save_dir, type, 'ori', 'mask{:d}.png'.format(k)), ori_img[32:-32,32:-32])
                cv2.imwrite(os.path.join(save_dir, type, 'gen', 'mask{:d}.png'.format(k)), re_img[32:-32,32:-32])
                cv2.imwrite(os.path.join(save_dir, type, 'mask', 'mask{:d}.png'.format(k)), mask[32:-32,32:-32])
            elif configs['db']['resize'] == [768, 768]:
                cv2.imwrite(os.path.join(save_dir, type, 'ori', 'mask{:d}.png'.format(k)), ori_img)
                cv2.imwrite(os.path.join(save_dir, type, 'gen', 'mask{:d}.png'.format(k)), re_img)
                cv2.imwrite(os.path.join(save_dir, type, 'mask', 'mask{:d}.png'.format(k)), mask)
            elif configs['db']['resize'] == [256, 256]:
                cv2.imwrite(os.path.join(save_dir, type, 'ori', name), ori_img)
                cv2.imwrite(os.path.join(save_dir, type, 'gen', name), re_img)
                cv2.imwrite(os.path.join(save_dir, type, 'mask', name), mask)
            else:
                raise Exception("invaild image size")
            #cv2.imwrite(os.path.join(save_dir, type, '{:d}.png'.format(k)), cat_img)
            cost_time.append(inference_time)
            if (k+1) % 20 == 0:
                print('{}th image, cost time: {:.1f}'.format(k+1, inference_time*1000))
            _t.clear()
        torch.save(s_map_list,os.path.join(save_dir) + '/s_map.pth')
    # calculate mean time
    cost_time = np.array(cost_time)
    cost_time = np.sort(cost_time)
    num = cost_time.shape[0]
    num90 = int(num*0.9)
    cost_time = cost_time[0:num90]
    mean_time = np.mean(cost_time)
    print('Mean_time: {:.1f}ms'.format(mean_time*1000))
    test_set.eval(save_dir)
Пример #16
0
def train(model, beam_searcher, train_set, valid_set, save_dir, lr,
          display=100, starting=0, endding=20, validation=2000, patience=10, logger=None):
    """
    display:    output training infomation every 'display' mini-batches
    starting:   the starting snapshots, > 0 when resuming training
    endding:    the least training snapshots
    validation: evaluate on validation set every 'validation' mini-batches
    patience:       increase of endding when finds better model
    """
    train_func, _ = adam_optimizer(model, lr=lr)
    print '... training'
    logger = Logger(save_dir) if logger is None else logger
    timer = Timer()
    loss = 0
    imb = starting * validation
    best = -1
    best_snapshot = -1
    timer.tic()
    while imb < endding*validation:
        imb += 1
        x = train_set.iterate_batch()
        loss += train_func(*x)[0] / display
        if imb % display == 0:
            logger.info('snapshot={}, iter={},  loss={:.6f},  time={:.1f} sec'.format(imb/validation, imb, loss, timer.toc()))
            timer.tic()
            loss = 0
        if imb % validation == 0:
            saving_index = imb/validation
            model.save_to_dir(save_dir, saving_index)
            try:
                scores = validate(beam_searcher, valid_set, logger)
                if scores[3] > best:
                    best = scores[3]
                    best_snapshot = saving_index
                    endding = max(saving_index+patience, endding)
                logger.info('    ---- this Bleu-4 = [%.3f],   best Bleu-4 = [%.3f], endding -> %d' % \
                            (scores[3], best, endding))
            except OSError:
                print '[Ops!! OS Error]'

    logger.info('Training done, best snapshot is [%d]' % best_snapshot)
    return best_snapshot
Пример #17
0
class Demo(state_machine.State):

    def __init__(self):
        state_machine.State.__init__(self)
        self.state_machine = state_machine.StateMachine()
        self.should_flip = False
        self.timer_delay = None
        self.blink = True

    def startup(self, now, persistant):
        self.state_machine.setup_states({
            'GAME': Game(),
            'CREDITS': Credits()
        }, 'CREDITS')
        self.state_machine.state.startup(now, persistant)
        self.should_flip = False
        self.blink = True
        self.timer_delay = Timer(1000)

    def cleanup(self):
        self.done = False
        return self.persist

    def get_event(self, event):
        if event.type == pygame.KEYDOWN:
            self.done = True
            self.next = 'GAME'
            self.state_machine.state.cleanup()

    def update(self, keys, now):
        if self.state_machine.state_name == 'GAME' and not self.state_machine.state.AI:
            if not self.should_flip:
                self.state_machine.state_dict['GAME'].AI = True
                self.should_flip = True
                self.state_machine.state_dict['GAME'].restart_game_state()
            else:
                self.should_flip = False
                self.state_machine.state.next = 'CREDITS'
                self.state_machine.flip_state()

        self.state_machine.update(keys, now)
        if self.timer_delay.check_tick(now):
            self.blink = not self.blink

    def draw(self, surface, interpolate):
        self.state_machine.draw(surface, interpolate)
        if self.blink:
            render_text_center(surface,
                               '{} VERSION {}\nPRESS ANY KEYS TO START'.format(GameConfig.CAPTION_WINDOW.capitalize(),
                                                                               GameConfig.VERSION_TEXT.capitalize()),
                               GameConfig.COLOR_CENTER_TEXT,
                               GameConfig.WINDOW_H - GameConfig.FONTSIZE_CENTER_TEXT * 2,
                               GameConfig.FONTSIZE_CENTER_TEXT)
Пример #18
0
def benchmark_edge_betweenness_centrality(G):
    print("Starting benchmark_edge_betweenness_centrality...")

    X = list(range(0, len(G.nodes), 5))
    Y_our = []
    Y_nx = []

    for i in X:
        sub_G = G.subgraph(list(G.nodes)[0:i])
        with Timer("Starting our edge_betweenness_centrality") as t:
            our_edge_betweenness_centrality(sub_G)
            Y_our.append(t.get_time())
        with Timer("Starting nx edge_betweenness_centrality") as t:
            nx_edge_betweenness_centrality(sub_G)
            Y_nx.append(t.get_time())

    plt.figure()
    plt.title("Time over graph size for executing edge_betweenness_centrality")
    plt.xlabel("Iteration level []")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our, label="Our implementation")
    plt.plot(X, Y_nx, label="Networkx implementation")
    plt.legend()
    plt.savefig("benchmark_edge_betweenness_centrality.png")
Пример #19
0
def HASE(b4, A_inverse, b_cov, C, N_con, DF):

    with Timer() as t:

        B13 = b_cov
        B4 = b4

        A1_B_constant = np.tensordot(A_inverse[:, :, 0:(N_con)],
                                     B13,
                                     axes=([2], [0]))

        A1_B_nonconstant = np.einsum('ijk,il->ijl',
                                     A_inverse[:, :, N_con:N_con + 1], B4)

        A1_B_full = A1_B_constant + A1_B_nonconstant

        BT_A1B_const = np.einsum('ij,lji->li', B13.T, A1_B_full[:,
                                                                0:(N_con), :])

        BT_A1B_nonconst = np.einsum('ijk,ijk->ijk', B4[:, None, :],
                                    A1_B_full[:, (N_con):N_con + 1, :])

        BT_A1B_full = BT_A1B_const[:, None, :] + BT_A1B_nonconst

        C_BTA1B = BT_A1B_full - C.reshape(1, -1)

        C_BTA1B = np.abs(C_BTA1B)

        a44_C_BTA1B = C_BTA1B * A_inverse[:, (N_con):N_con + 1,
                                          (N_con):N_con + 1]

        a44_C_BTA1B = np.sqrt((a44_C_BTA1B))

        t_stat = np.sqrt(DF) * np.divide(A1_B_full[:, (N_con):N_con + 1, :],
                                         a44_C_BTA1B)

        SE = a44_C_BTA1B / np.sqrt(DF)

    print "time to compute GWAS for {} phenotypes and {} SNPs .... {} sec".format(
        b4.shape[1], A_inverse.shape[0], t.secs)
    return t_stat, SE
Пример #20
0
def train(model, beam_searcher, train_set, valid_set, save_dir, lr,
          display=100, starting=0, endding=20, validation=2000, life=10, logger=None):
    """
    display:    output training infomation every 'display' mini-batches
    starting:   the starting snapshots, > 0 when resuming training
    endding:    the least training snapshots
    validation: evaluate on validation set every 'validation' mini-batches
    life:       increase of endding when finds better model
    """
    train_func, _ = adam_optimizer(model, lr=lr)
    print '... training'
    logger = Logger(save_dir) if logger is None else logger
    timer = Timer()
    loss = 0
    imb = starting * validation
    best = -1
    best_snapshot = -1
    timer.tic()
    while imb < endding*validation:
        imb += 1
        x = train_set.iterate_batch()
        loss += train_func(*x)[0] / display
        if imb % display == 0:
            logger.info('snapshot={}, iter={},  loss={:.6f},  time={:.1f} sec'.format(imb/validation, imb, loss, timer.toc()))
            timer.tic()
            loss = 0
        if imb % validation == 0:
            saving_index = imb/validation
            model.save_to_dir(save_dir, saving_index)
            try:
                scores = validate(beam_searcher, valid_set, logger)
                if scores[3] > best:
                    best = scores[3]
                    best_snapshot = saving_index
                    endding = max(saving_index+life, endding)
                logger.info('    ---- this Bleu-4 = [%.3f],   best Bleu-4 = [%.3f], endding -> %d' % \
                            (scores[3], best, endding))
            except OSError:
                print '[Ops!! OS Error]'

    logger.info('Training done, best snapshot is [%d]' % best_snapshot)
    return best_snapshot
Пример #21
0
    def run(self, matrix, maxiter=30):   # matrix shape(#doc, #word)
        """
        Run the Gibbs sampler.
        """
        n_docs, vocab_size = matrix.shape

        self._initialize(matrix)

        for it in range(maxiter):
            timer = Timer()
            timer.start()

            print('--- iter '+str(it))

            for m in range(n_docs):
                # 在doc m下的第i個字 --  w (此處的w是td_matrix中,word的index)
                for i, w in enumerate(word_indices(matrix[m, :])):
                    z = self.topics[(m, i)]
                    self.nmz[m, z] -= 1
                    self.nm[m] -= 1
                    self.nzw[z, w] -= 1
                    self.nz[z] -= 1

                    p_z = self._conditional_distribution(m, w)
                    z = sample_index(p_z)

                    self.nmz[m, z] += 1
                    self.nm[m] += 1
                    self.nzw[z, w] += 1
                    self.nz[z] += 1
                    self.topics[(m, i)] = z

            timer.print_time()
            print('--- end iter')

            # FIXME: burn-in and lag!
            yield self.phi_pzw()
Пример #22
0
class Turret:
    '''
    The object thats responsible for managing the shooter
    '''
    def __init__(self):
        self.clockwise_limit_switch = DigitalInput(
            TURRET_CLOCKWISE_LIMIT_SWITCH)
        self.counterclockwise_limit_switch = DigitalInput(
            TURRET_COUNTERCLOCKWISE_LIMIT_SWITCH)

        self.turn_motor = SparkMax(TURRET_TURN_MOTOR)
        self.turn_pid = PIDController(0.4, 0.001, 0.02)

        self.shoot_motor_1 = Falcon(TURRET_SHOOT_MOTORS[0])
        self.shoot_motor_2 = Falcon(TURRET_SHOOT_MOTORS[1])
        self.timer = Timer()

        self.limelight = Limelight()

    def set_target_angle(self, angle):
        '''
        Sets the target angle of the turret. This will use a PID to turn the
        turret to the target angle.
        '''

        target_encoder = angle_to_encoder(angle)
        self.turn_pid.setSetpoint(target_encoder)

    def update(self):
        '''
        This is used to continuously update the turret's event loop.

        All this manages as of now is the turrets PID controller.

        The shoot motor is constantly running at a low percentage until we need it.
        '''

        motor_speed = self.turn_pid.calculate(
            self.limelight.get_target_screen_x())

        if self.clockwise_limit_switch.get() and motor_speed < 0:
            self.turn_motor.set_percent_output(motor_speed)

        elif self.counterclockwise_limit_switch.get() and motor_speed > 0:
            self.turn_motor.set_percent_output(motor_speed)

    def shoot(self):
        '''
        The wheel to shoot will rev up completely before balls start feeding
        in from the singulator.
        '''
        # One of the motors will be reversed, so make sure the shoot motor has the correct ID!
        speed = self.shoot_motor_1.get_percent_output()
        if speed < 1:
            speed += 0.02
        elif speed > 1:
            speed -= 0.02

        self.shoot_motor_1.set_percent_output(speed)
        self.shoot_motor_2.set_percent_output(-speed)

    def idle(self):
        '''
        Resets the motors back to their default state.
        '''
        speed = self.shoot_motor_1.get_percent_output()
        if speed < 0.5:
            speed += 0.02
        elif speed > 0.5:
            speed -= 0.02

        self.shoot_motor_1.set_percent_output(speed)
        self.shoot_motor_2.set_percent_output(-speed)

    def is_full_speed(self):
        '''
        Returns if the motor is at full speed or not.
        '''
        self.timer.get() > 0.4
def test_mvtec(test_set, rebuilder, transform, save_dir, threshold_seg_dict, val_index):
    _t = Timer()
    cost_time = list()
    threshold_dict = dict()
    if not os.path.exists(os.path.join(save_dir, 'ROC_curve')):
        os.mkdir(os.path.join(save_dir, 'ROC_curve'))
    for item in test_set.test_dict:
        threshold_list = list()
        item_dict = test_set.test_dict[item]

        if not os.path.exists(os.path.join(save_dir, item)):
            os.mkdir(os.path.join(save_dir, item))
            os.mkdir(os.path.join(save_dir, item, 'ori'))
            os.mkdir(os.path.join(save_dir, item, 'gen'))
            os.mkdir(os.path.join(save_dir, item, 'mask'))
        for type in item_dict:
            if not os.path.exists(os.path.join(save_dir, item, 'ori', type)):
                os.mkdir(os.path.join(save_dir, item, 'ori', type))
            if not os.path.exists(os.path.join(save_dir, item, 'gen', type)):
                os.mkdir(os.path.join(save_dir, item, 'gen', type))
            if not os.path.exists(os.path.join(save_dir, item, 'mask', type)):
                os.mkdir(os.path.join(save_dir, item, 'mask', type))
            _time = list()
            img_list = item_dict[type]
            for path in img_list:
                image = cv2.imread(path, cv2.IMREAD_COLOR)
                ori_h, ori_w, _ = image.shape
                _t.tic()
                ori_img, input_tensor = transform(image)
                out = rebuilder.inference(input_tensor)
                re_img = out.transpose((1, 2, 0))
                s_map = ssim_seg(ori_img, re_img, win_size=11, gaussian_weights=True)
                s_map = cv2.resize(s_map, (ori_w, ori_h))
                if val_index == 1:
                    mask = seg_mask(s_map, threshold=threshold_seg_dict[item])
                elif val_index == 0:
                    mask = seg_mask(s_map, threshold=threshold_seg_dict)
                else:
                    raise Exception("Invalid val_index")

                inference_time = _t.toc()
                img_id = path.split('.')[0][-3:]
                cv2.imwrite(os.path.join(save_dir, item, 'ori', type, '{}.png'.format(img_id)), ori_img)
                cv2.imwrite(os.path.join(save_dir, item, 'gen', type, '{}.png'.format(img_id)), re_img)
                cv2.imwrite(os.path.join(save_dir, item, 'mask', type, '{}.png'.format(img_id)), mask)
                _time.append(inference_time)

                if type != 'good':
                    threshold_list.append(s_map)
                else:
                    pass

            cost_time += _time
            mean_time = np.array(_time).mean()
            print('Evaluate: Item:{}; Type:{}; Mean time:{:.1f}ms'.format(item, type, mean_time*1000))
            _t.clear()
        threshold_dict[item] = threshold_list
    # calculate mean time
    cost_time = np.array(cost_time)
    cost_time = np.sort(cost_time)
    num = cost_time.shape[0]
    num90 = int(num*0.9)
    cost_time = cost_time[0:num90]
    mean_time = np.mean(cost_time)
    print('Mean_time: {:.1f}ms'.format(mean_time*1000))

    # evaluate results
    print('Evaluating...')
    test_set.eval(save_dir,threshold_dict)
Пример #24
0
def run(cfg: config.EvolutionConfig) -> None:
    toolbox = create_toolbox(cfg)

    population = toolbox.population(n=cfg.population_size)
    fitnesses = [toolbox.evaluate(el) for el in population]
    for ind, fit in zip(population, fitnesses):
        ind.fitness.values = fit

    CXPB, MXPB = 0.3, 0.3
    child_per_parent = 2

    i = 0
    with Timer() as timer, SolutionTracer(
            filename=
            f"Evolutionary_CXPB_{CXPB}_MXPB_TS_{TOURNAMENT_SIZE}_{MXPB}_CPP_{child_per_parent}_I_{cfg.max_iterations}_PS_{cfg.population_size}",
            max_repetitions=cfg.max_iterations,
            id=cfg.id,
            clues=cfg.clues) as solution_tracer:
        while cfg.max_iterations > i:
            i += 1

            offspring = population
            # Clone the selected individuals
            offspring = list(map(toolbox.clone, child_per_parent * offspring))

            for child1, child2 in zip(offspring[::2], offspring[1::2]):
                if random.random() < CXPB:
                    # if random.random() < 0.5:
                    toolbox.mate_score(child1, child2)
                    # else:
                    #     toolbox.mate_r(child1, child2)
                    del child1.fitness.values
                    del child2.fitness.values

            for mutant in offspring:
                if random.random() < MXPB:
                    if random.random() < 0.8:
                        toolbox.mutate_swap_many(mutant)
                    else:
                        toolbox.mutate_swap_one(mutant)
                    del mutant.fitness.values

            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            best_offspring = []
            if child_per_parent > 1:
                for begin, end in zip(
                        range(0,
                              len(offspring) - child_per_parent + 1,
                              child_per_parent),
                        range(child_per_parent,
                              len(offspring) + 1, child_per_parent)):
                    best_offspring.append(
                        sorted(offspring[begin:end],
                               key=lambda x: x.fitness.values[0])[0])
            else:
                best_offspring = offspring

            population = choose_unique(best_offspring)
            population = toolbox.select_t(population, cfg.population_size)

            # saving and checking stats
            best = tools.selBest(population, 1)[0]
            solution_tracer.update(best, timer.elapsed)
            if best.fitness.values[0] == 0:
                break
            if i % 100 == 0:
                print(i, best.fitness)
                print(best.sudoku)

    best = tools.selBest(population, 1)[0]
    print(best.fitness)
    print(best.sudoku)
Пример #25
0
def train():
    args = get_arguments()
    print '*' * 10 + ' args ' + '*' * 10
    print args
    print '*' * 26
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)

    if args.data_provider == "coco":
        #pass
        coco_provider_train = COCO_detection_train(args.root_dir,
                                                   args.json_file_train)
        coco_provider_val = COCO_detection_train(args.root_dir,
                                                 args.json_file_val)

        data_provider_train = MultiDataProvider([coco_provider_train],
                                                crop_size=args.crop_size,
                                                min_ratio=min_ratio,
                                                max_ratio=max_ratio,
                                                hor_flip=hor_flip)

        data_provider_val = MultiDataProvider([coco_provider_val],
                                              crop_size=args.crop_size,
                                              min_ratio=1,
                                              max_ratio=1.1,
                                              hor_flip=False)

    else:
        raise RuntimeError, 'unknown data provider type'

    solver_param = SolverParameter()
    solver_param.type = args.type
    solver_param.base_lr = args.learning_rate
    solver_param.lr_policy = args.lr_policy
    solver_param.gamma = args.gamma
    solver_param.stepsize = args.step_size
    solver_param.gpu_list = args.gpus_list
    solver_param.exclude_scope = args.exclude_scope

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)
    solver = Solver(solver_param, sess)

    total_size = args.batch_size * solver.num_gpus
    print('total size for single forward: ', total_size)

    with tf.name_scope('input'):
        inputs = {
            'images':
            tf.placeholder(tf.float32,
                           shape=(total_size, args.crop_size, args.crop_size,
                                  3),
                           name='input_images')
        }
        label_gt = {
            'labels':
            tf.placeholder(dtype=tf.int32,
                           shape=(total_size, int(args.crop_size * scale),
                                  int(args.crop_size * scale)),
                           name='input_label')
        }

    global_step = tf.train.get_or_create_global_step()
    nas_refine_net = nas_refine_model(num_cls=args.num_classes,
                                      is_training=True,
                                      ohem=args.ohem,
                                      mining_ratio=args.mining_ratio)

    train_op, total_loss, metric = solver.deploy(
        model_fn=nas_refine_net.infer,
        loss_fn=nas_refine_net.loss_fn,
        eval_fn=nas_refine_net.eval_metric,
        inputs=inputs,
        targets=label_gt,
        total_size=total_size,
        regularize=True)

    miou = metric['miou']
    tf.summary.scalar('miou', miou)
    tf.summary.scalar('total_loss', total_loss)
    tf.summary.scalar('lr', solver.learning_rate)

    ### init
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    ### restore
    flag = STATUS.NO_INIT
    variables_to_restore = None
    if args.init_weights != None:
        flag = STATUS.FINTTUNE
        # if not os.path.exists(args.init_weights):
        #     raise RuntimeError, '{} does not exist for finetuning'.format(args.init_weights)
        init_weights = args.init_weights
        variables_to_restore = solver.get_variables_finetune()

    if args.solver_state != None:
        flag = STATUS.CONTINUE
        if os.path.isdir(args.solver_state):
            solver_state = tf.train.latest_checkpoint(args.solver_state)
        else:
            # if not os.path.exists(args.solver_state):
            #     raise RuntimeError, '{} does not exist for continue training'.format(args.solver_state, flag)
            solver_state = args.solver_state
        variables_to_restore = solver.get_variables_continue_training()

    if flag == STATUS.FINTTUNE:
        loader = tf.train.Saver(var_list=variables_to_restore)
        loader.restore(sess, init_weights)
        print('{} loaded'.format(init_weights))
    elif flag == STATUS.CONTINUE:
        loader = tf.train.Saver(var_list=variables_to_restore)
        loader.restore(sess, solver_state)
        print('{} loaded'.format(solver_state))

    all_summaries = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(args.save_dir)
    saver = tf.train.Saver()

    label_sz = int(args.crop_size * scale)
    #timer = Timer()
    for step in range(1, args.num_steps):
        #print 'step: ', step
        #data_blob = np.zeros((total_size, args.crop_size, args.crop_size, 3), dtype=np.float32)
        #label_blob = np.zeros((total_size, label_sz, label_sz), dtype=np.int32)
        data_blob = np.zeros((0, args.crop_size, args.crop_size, 3),
                             dtype=np.float32)
        label_blob = np.zeros((0, label_sz, label_sz), dtype=np.int32)
        timer = Timer()
        timer.tic()
        for cur_id in range(solver.num_gpus):
            #start = int(cur_id * args.batch_size)
            #end = int((cur_id + 1) * args.batch_size)
            while True:
                images, labels = data_provider_train.get_batch(args.batch_size)
                #data_blob[start:end] = images
                labels_resize = np.zeros((labels.shape[0], label_sz, label_sz),
                                         dtype=np.int32)
                for n in range(labels.shape[0]):
                    labels_resize[n] = cv2.resize(
                        labels[n], (label_sz, label_sz),
                        interpolation=cv2.INTER_NEAREST)

                #label_blob[start:end] = labels_resize

                if np.any(labels_resize >= 0):
                    data_blob = np.concatenate((data_blob, images), axis=0)
                    label_blob = np.concatenate((label_blob, labels_resize),
                                                axis=0)
                    break

        assert label_blob.shape[0] == total_size
        assert data_blob.shape[0] == total_size

        ### run training op
        _, losses_value, _, summary, global_step_val = sess.run(
            [train_op, total_loss, metric['op'], all_summaries, global_step],
            feed_dict={
                inputs['images']: data_blob,
                label_gt['labels']: label_blob
            })

        summary_writer.add_summary(summary, global_step=global_step_val)

        ### show
        if step % args.show_steps == 0:
            t1 = timer.toc()
            print(
                'step: {}, lr: {}, loss_value: {}, miou: {}, time: {} / per iter'
                .format(global_step_val, sess.run(solver.learning_rate),
                        losses_value, miou.eval(session=sess), t1))

            #time = timer.toc()

            ## save
        if step % args.save_step == 0:
            save(saver, sess, args.save_dir, step=global_step_val)

    ### test
        if step % args.val_steps == 0:
            test_loss = 0
            print('#' * 5 + ' testing ' + '#' * 5)
            for kk in range(test_iter):
                for cur_id in range(solver.num_gpus):
                    start = int(cur_id * args.batch_size)
                    end = int((cur_id + 1) * args.batch_size)
                    while True:
                        images, labels = data_provider_val.get_batch(
                            args.batch_size)

                        data_blob[start:end] = images
                        labels_resize = np.zeros(
                            (labels.shape[0], label_sz, label_sz),
                            dtype=np.int32)
                        for n in range(labels.shape[0]):
                            labels_resize[n] = cv2.resize(
                                labels[n], (label_sz, label_sz),
                                interpolation=cv2.INTER_NEAREST)
                        label_blob[start:end] = labels_resize
                        if np.any(labels_resize >= 0):
                            break

                losses_value = sess.run([total_loss],
                                        feed_dict={
                                            inputs['images']: data_blob,
                                            label_gt['labels']: label_blob
                                        })
                test_loss += losses_value[0]

            print('global_step_val: {}, test loss: {}'.format(
                global_step_val,
                float(test_loss) / test_iter))
            print('#' * 19)
Пример #26
0
# with Timer('Explicit inversion'):
# 	A_inv = np.linalg.inv(A)
# 	x = np.dot(A_inv, -s)  # fun fact: this can be also written as A_inv @ (-s)

# scipy's LU factorization
# with Timer('LU solver'):
# 	LU, piv = sl.lu_factor(A, overwrite_a=False, check_finite=False)
# 	x = sl.lu_solve((LU, piv), -s)

# scipy version of linalg.solve
# with Timer('scipy.linalg.solve'):
# 	x = sl.solve(A, -s, assume_a='sym', overwrite_a=False, overwrite_b=True, check_finite=False)

# low-level LAPACK solver
lapack_gesv = sl.get_lapack_funcs('gesv', (A, -s))
with Timer('Low-level LAPACK solver'):
    _, _, x, _ = lapack_gesv(A, -s)

# LDL decomposition - rather slow and pretty numerically unstable
# with Timer('LDL decomposition'):
# 	lu, d, perm = sl.ldl(A, overwrite_a=True, check_finite=False)
# 	L = lu[perm]
# 	z = sl.solve(L, -s, overwrite_a=True, check_finite=False)
# 	y = sl.solve(d, z, overwrite_a=True, check_finite=False)
# 	x = sl.solve(d @ L.T, y, overwrite_a=True, check_finite=False)

# plot the sites, colored with their value of x
plt.figure(figsize=(8, 8))
sc = plt.scatter(positions[:, 0], positions[:, 1], c=x, cmap='viridis')
plt.gca().set_aspect('equal')
plt.colorbar(sc, fraction=0.046, pad=0.035)
    with open(cfg_file, "r") as f:
        configs = json.load(f)
    start_epoch = configs['op']['start_epoch']
    max_epoch = configs['op']['max_epoch']
    learning_rate = configs['op']['learning_rate']
    decay_rate = configs['op']['decay_rate']
    epoch_steps = configs['op']['epoch_steps']
    snapshot = configs['op']['snapshot']
    batch_size = configs['db']['batch_size']
    loader_threads = configs['db']['loader_threads']
    save_dir = configs['system']['save_dir']

    # init Timer
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    _t = Timer()

    # create log file
    log = Log(args.log_dir, args.cfg)
    log.wr_cfg(configs)

    # load data set
    training_set = load_data_set_from_factory(configs, 'train')
    print('Data set: {} has been loaded'.format(configs['db']['name']))

    # load model
    trainer = load_training_model_from_factory(configs, ngpu=args.ngpu)
    if configs['system']['resume']:
        trainer.load_params(configs['system']['resume_path'])
    print('Model: {} has been loaded'.format(configs['model']['name']))
Пример #28
0
    def fit( self, data,
            iterations=1000,
            batch_size=64,
            k=2,
            out_dir='./out',
            out_iter=5 ):
        #some helpful things
        ones = np.ones( ( batch_size, 1 ) ).astype( 'float32' )
        minus_ones = ones * -1.
        timer = Timer( iterations )
        output_pattern = out_dir + '/{:0' + str( len( str( iterations ) ) ) + 'd}.png' #erghh if it is stupid but it works it is not stupid
        clear = '\r                                                                                                                                                '
        progress = '{} | D( loss:\t{:0.2f}, diff:\t{:0.2f}, norm:\t{:0.2f}, ; G( loss:\t{:0.2f}  )'

        #get some noise:
        out_samples = self.make_some_noise()
        distance_samples = self.make_some_noise( 10 )

        print( distance( distance_samples ) )

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            # print( distance( np.clip( ( self.generate( distance_samples ) + 1 ) * 127.5, 0, 255 ) ) )
            io.imsave( out_dir + '/real_samples.png',
                image_grid( np.clip( ( data[ : 64 ] + 1 ) * 127.5, 0, 255 ) ), # make sure we have valid values
                plugin='pil' )

        for i in range( iterations ):
            timer.start_step()
            #train discriminator
            self.make_trainable( True )
            for j in range( k ):
                real_data = data[ np.random.choice( data.shape[0], batch_size, replace=False ), : ]
                fake_data = self.generate( self.make_some_noise( batch_size ) )
                epsilon = np.random.random( batch_size )
                interpolation = ( real_data.T * epsilon ).T + ( fake_data.T * ( 1 - epsilon ) ).T
                d_loss, d_diff, d_norm = self.dis_trainer.train_on_batch( [ real_data, fake_data, interpolation ], [ ones ] * 2 )

                ##something messed up
                # for l in self.dis_trainer.layers:
                #     weights = l.get_weights()
                #     replace = False
                #     for j, w in enumerate( weights ):
                #         if np.isnan( w ).any():
                #             weights[ j ] = np.nan_to_num( w )
                #             replace = True
                #     if replace:
                #         l.set_weights( weights )
                # if replace:
                #     print('\nfucking NaN man')

            #trian generator
            self.make_trainable( False )
            g_loss = self.gan.train_on_batch( self.make_some_noise( batch_size ), minus_ones )

            ##something messed up
            # for l in self.gan.layers:
            #     weights = l.get_weights()
            #     replace = False
            #     for j, w in enumerate( weights ):
            #         if np.isnan( w ).any():
            #             weights[ j ] = np.nan_to_num( w )
            #             replace = True
            #     if replace:
            #         l.set_weights( weights )
            # if replace:
            #     print('\nfucking NaN man')

            if np.isnan( d_loss ):
                for j,l in enumerate( self.gan.layers):
                    for k, w in enumerate(  l.get_weights() ):
                        w = np.nan_to_num( w )
                        print( '{}/{}: {}/{}'.format( j, k, np.min( w ), np.max( w ) )  )


            if i % out_iter == 0:
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    # print( distance( np.clip( ( self.generate( distance_samples ) + 1 ) * 127.5, 0, 255 ) ) )
                    io.imsave( output_pattern.format( i ),
                        image_grid( np.clip( ( self.generate( out_samples ) + 1 ) * 127.5, 0, 255 ) ), # make sure we have valid values
                        plugin='pil' )




            timer.stop_step()
            #progess reporting
            sys.stdout.write( clear )
            sys.stdout.flush()
            sys.stdout.write( '\r' + progress.format( timer.out_str(), d_loss, d_diff, d_norm, g_loss ) )
            sys.stdout.flush()
Пример #29
0
    def _HRZColumnsMap(self):
        return {}

    def _CollectInHomeDeviceID(self):
        return [
            "W634iMCwmSCcjQkltb7d38btv000%02d" % i
            for i in [18, 6, 23, 1, 13, 12, 4, 5, 24, 14]
        ]

    def _unnormalDataPrecess(self, a, b, c, d):
        return a, b, c, d


class BXYMain(MainModel):
    def dataReader(self, startDate, endDate, HRZID, Code):
        return XK(startDate, endDate, HRZID, Code)

    def genePsudoWeather(self, stageData, weatherIndex, pLength):
        return [-1]


if __name__ == '__main__':
    # 声明当前使用的时钟对象
    Clock = Timer('2018-02-26 00:00:00', 15)
    # 声明当前使用的模型对象
    Model = CNNModel
    BXY_1 = BXYMain(True, '2017-12-15 00:00:00', Clock, 30, Model, [
        u"瞬时流量", u'气象站室外温度', u"气象站室外湿度", u"气象站室外风速", u'气象站室外光照', u"回水压力",
        u'供水流量', u"燃气温度"
    ], [u'一次回温度'], [1], False, [1, 2, 3, 4], [0])
    testVariable = BXY_1.main()
def discreteness_constraint(x):
	return (x - 0.5)*(x - 0.5) - 0.25
def constr_jac(x):
	return 2 * np.identity(n) * x - 1.
def constr_hess(x, v):
	return 2 * np.identity(n) * v

# linear constraint matrix
C = np.ones((1, n), dtype=np.float32)
# C[1:] = np.identity(n, dtype=np.float32)
# lb = np.zeros(n+1, dtype=np.float32)
# lb[0] = m
# ub = np.ones(n+1, dtype=np.float32)
# ub[0] = m

with Timer('minimizer'):
	res = so.minimize(energy, x0=np.ones_like(s), jac=jac, hess=hess, method='trust-constr',
		              constraints=[so.LinearConstraint(C, m, m),
		                           so.NonlinearConstraint(discreteness_constraint, 0, 1e-8)])
	x = res.x
	# x = np.round(res.x)
print(f"Final energy: {energy(x)}")
print(x)
print(np.sum(x))

# plot the sites, colored with their value of x
plt.figure(figsize=(8, 8))
sc = plt.scatter(positions[:, 0], positions[:, 1], c=x, cmap='viridis')
plt.gca().set_aspect('equal')
plt.colorbar(sc, fraction=0.046, pad=0.035)
plt.show()
Пример #31
0
class Path:
    def __init__(self, kS, kV, trackwidth, trajectory):
        '''
        Creates a controller for following a PathWeaver trajectory.

        __init__(self, kS: Volts, kV: Volts * Seconds / Meters, trackwidth: Meters, trajectory: wpilib.trajectory.Trajectory)

        :param kS: The kS gain determined by characterizing the Robot's drivetrain
        :param kV: The kV gain determined by characterizing the Robot's drivetrain
        :param trackwidth: The horizontal distance between the left and right wheels of the tank drive.
        :param trajectory: The trajectory to follow. This can be generated by PathWeaver, or made by hand.
        '''

        self.kS = kS
        self.kV = kV

        self.trajectory = trajectory

        self.odometry = DifferentialDriveOdometry(
            Rotation2d(radians(0)), self.trajectory.initialPose())

        self.ramsete = RamseteController(2, 0.7)
        self.drive_kinematics = DifferentialDriveKinematics(trackwidth)

        self.are_wheel_speeds_zero = False
        self.timer = Timer()

    def is_done(self):
        '''
        is_done(self) -> bool

        Returns whether or not the path is done.
        '''
        return self.are_wheel_speeds_zero

    def reset(self, chassis, gyro):
        '''
        Re-initializes all of the data in the controller as if the path has not yet been executed.
        This method MUST be called in teleopInit and autonomousInit directly before the controller is used.

        reset(self, chassis: traits.DriveTrain, gyro: traits.Gyro)

        :param chassis: An object that implements the DriveTrain trait. This object's encoders will be reset by this function
        :param gyro: An object that implements the Gyro trait. This object's angle will be reset by this function
        '''

        # Assert the objects implement the proper traits
        assert chassis.implements(DriveTrain)
        assert gyro.implements(Gyro)

        # The encoders and gyro need to be reset so that the Ramsete
        # controller is fed data that looks new.
        # By reseting these sensors, we look like weve never run the path at all!
        chassis.reset_encoders()
        gyro.reset()
        self.are_wheel_speeds_zero = False

        # The odometry object also needs to be re-initialized
        # so that it forgets the state from the previous run
        self.odometry = DifferentialDriveOdometry(
            Rotation2d(radians(0)), self.trajectory.initialPose())

        self.timer.start()

    def follow(self, chassis, gyro):
        '''
        This updates the Path and drives the chassis to follow the path

        update(self, chassis: traits.DriveTrain, gyro: traits.Gyro)

        :param chassis: An object that implements the DriveTrain trait. When this function is called,
                        the object's motors will be driven to follow the last set trajectory.
        :param gyro: An object that implements the gyro trait.
        '''
        # Assert the objects implement the proper traits
        assert chassis.implements(DriveTrain)
        assert gyro.implements(Gyro)

        if self.is_done():
            return

        # Set the chassis to low gear for more precise movements
        chassis.set_low_gear()

        # If a trajectory has been set, run it
        if self.trajectory is not None:
            # Get the accumulated left and right distance of the chass
            ld, rd = chassis.get_left_distance(), chassis.get_right_distance()

            # Ramsete requires the counterclockwise angle of the Robot
            angle = gyro.get_counterclockwise_degrees()

            # Get the current position of the robot
            current_pose = self.odometry.update(Rotation2d(radians(angle)), ld,
                                                rd)

            # Calculate the target position using the trajectory, and get the chassis wheel speeds
            target_pose = self.trajectory.sample(self.timer.get())
            chassis_speed = self.ramsete.calculate(current_pose, target_pose)
            wheel_speeds = self.drive_kinematics.toWheelSpeeds(chassis_speed)
            l, r = wheel_speeds.left, wheel_speeds.right

            if abs(l) == 0 and abs(r) == 0:
                self.are_wheel_speeds_zero = True

            # Convert the left and right wheel speeds to volts using the characterized constants,
            # and then convert those to percent values from -1 to 1
            chassis.tank_drive((self.kS + l * self.kV) / 12,
                               (self.kS + r * self.kV) / 12)
class Credits(state_machine.State):
    def __init__(self):
        state_machine.State.__init__(self)
        self.land = None
        self.aircraft = None
        self.aircraft_team = None
        self.timer_delay = None
        self.draw_credits = False

        self.timer_display_message = None
        self.display_message = ''
        self.debug_group = components.debug.debug_group
        self.number_plateform_save = 0
        self.gravity_save = 0
        self.credit_index = 0

    def startup(self, now, persistant):
        state_machine.State.startup(self, now, persistant)
        self.number_plateform_save = LandingConfig.numberOfPlateforms
        self.gravity_save = LanderConfig.GRAVITY
        LanderConfig.GRAVITY = 0
        LandingConfig.numberOfPlateforms = 0
        self.draw_credits = False
        self.timer_delay = Timer(CreditConfig.DELAY_BETWEEN_CREDITS)
        self.land = Landing()
        self.aircraft = Lander()
        self.aircraft.fuel = 1000
        self.timer_display_message = Timer(CreditConfig.CREDIT_DURATION)
        self.aircraft_team = pygame.sprite.Group(self.aircraft)
        self.aircraft.orientation = 0
        self.credit_index = 0

    def cleanup(self):
        self.done = False
        LandingConfig.numberOfPlateforms = self.number_plateform_save
        LanderConfig.GRAVITY = self.gravity_save
        return self.persist

    def get_event(self, event):
        pass

    def update(self, keys, now):
        self.now = now
        self.debug_group.update()
        self.aircraft.boost()
        self.aircraft.vy = 0
        self.aircraft.vx = CreditConfig.SPEED_AIRCRAFT
        self.aircraft.y = CreditConfig.AIRCRAFT_Y_POS
        self.land.landingEntities.update()
        self.aircraft_team.update()
        self.aircraft.fuel = 1000
        if self.aircraft.rect.left > GameConfig.WINDOW_W:
            self.land = Landing()
            self.aircraft.x = -15

        if not self.draw_credits:
            self.timer_display_message.timer = now
            if self.timer_delay.check_tick(now):
                self.draw_credits = not self.draw_credits
        else:
            self.timer_delay.timer = now
            if self.timer_display_message.check_tick(now):
                self.draw_credits = not self.draw_credits
                self.credit_index += 1
        if self.credit_index >= len(CreditConfig.CREDITS):
            self.done = True
            self.next = 'GAME'
        else:
            self.display_message = CreditConfig.CREDITS[self.credit_index]

    def draw(self, surface, interpolate):
        surface.fill(GameConfig.BACKGROUND_COLOR)
        self.land.landingEntities.draw(surface)
        self.aircraft_team.draw(surface)
        self.debug_group.draw(surface)
        if self.draw_credits:
            render_text_center(surface, self.display_message,
                               GameConfig.COLOR_CENTER_TEXT,
                               CreditConfig.OFFSET_Y_CREDIT,
                               GameConfig.FONTSIZE_CENTER_TEXT)