コード例 #1
0
def empirical_evaluation(G, interval):
    """Empirical evaluation
    Plot the computation time (y-axis) given the number of nodes (x-axis)
    of subgraphs, for each interval of nodes.
    """
    X = list(range(interval, len(G.nodes) + 1, interval))
    Y_our = []
    Y_nx = []

    for i in X:
        sub_G = G.subgraph(list(G.nodes)[0:i])
        with Timer(f"Our betweenness_centrality ({i} nodes)") as t:
            bc.betweenness_centrality(sub_G)
            Y_our.append(t.get_time())
        with Timer(f"NetworkX betweenness_centrality ({i} nodes)") as t:
            nx.betweenness_centrality(sub_G)
            Y_nx.append(t.get_time())
        print("---")

    plt.figure()
    plt.title("Computing time of the function Betweenness centrality")
    plt.xlabel("Number of nodes")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our, label="Our implementation")
    plt.plot(X, Y_nx, label="NetworkX implementation")
    plt.legend()
コード例 #2
0
    def convert_genotypes(self):

        chunk_size = self.split_size
        if chunk_size is None:
            raise ValueError(
                'CONVERTER_SPLIT_SIZE does not define in config file!')
        G = np.array([])
        #self.reader.folder.processed=0
        while True:
            with Timer() as t:
                G = self.reader.folder.get_bed(chunk_size)
                if isinstance(G, type(None)):
                    break

            print('Time to read {} SNPs is {} s'.format(G.shape[0], t.secs))

            self.write_data('gen')
            atom = tables.Int8Atom()
            self.genotype = self.h5_gen_file.create_carray(
                self.h5_gen_file.root,
                'genotype',
                atom, (G.shape),
                title='Genotype',
                filters=self.pytable_filters)
            with Timer() as t:
                self.genotype[:] = G

            print('Time to write {} SNPs is {} s'.format(G.shape[0], t.secs))

            self.h5_gen_file.close()
            G = None
            gc.collect()
コード例 #3
0
 def startup(self, now, persistant):
     state_machine.State.startup(self, now, persistant)
     self.number_plateform_save = LandingConfig.numberOfPlateforms
     self.gravity_save = LanderConfig.GRAVITY
     LanderConfig.GRAVITY = 0
     LandingConfig.numberOfPlateforms = 0
     self.draw_credits = False
     self.timer_delay = Timer(CreditConfig.DELAY_BETWEEN_CREDITS)
     self.land = Landing()
     self.aircraft = Lander()
     self.aircraft.fuel = 1000
     self.timer_display_message = Timer(CreditConfig.CREDIT_DURATION)
     self.aircraft_team = pygame.sprite.Group(self.aircraft)
     self.aircraft.orientation = 0
     self.credit_index = 0
コード例 #4
0
ファイル: path.py プロジェクト: med-jed/rc2020-1
    def __init__(self, kS, kV, trackwidth, trajectory):
        '''
        Creates a controller for following a PathWeaver trajectory.

        __init__(self, kS: Volts, kV: Volts * Seconds / Meters, trackwidth: Meters, trajectory: wpilib.trajectory.Trajectory)

        :param kS: The kS gain determined by characterizing the Robot's drivetrain
        :param kV: The kV gain determined by characterizing the Robot's drivetrain
        :param trackwidth: The horizontal distance between the left and right wheels of the tank drive.
        :param trajectory: The trajectory to follow. This can be generated by PathWeaver, or made by hand.
        '''

        self.kS = kS
        self.kV = kV

        self.trajectory = trajectory

        self.odometry = DifferentialDriveOdometry(
            Rotation2d(radians(0)), self.trajectory.initialPose())

        self.ramsete = RamseteController(2, 0.7)
        self.drive_kinematics = DifferentialDriveKinematics(trackwidth)

        self.are_wheel_speeds_zero = False
        self.timer = Timer()
コード例 #5
0
def test_chip(test_set, rebuilder, transform, save_dir):
    _t = Timer()
    cost_time = list()
    for type in test_set.test_dict:
        img_list = test_set.test_dict[type]
        if not os.path.exists(os.path.join(save_dir, type)):
            os.mkdir(os.path.join(save_dir, type))
        for k, path in enumerate(img_list):
            image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
            _t.tic()
            ori_img, input_tensor = transform(image)
            out = rebuilder.inference(input_tensor)
            re_img = out[0]
            s_map = ssim_seg(ori_img, re_img, win_size=11, gaussian_weights=True)
            mask = seg_mask(s_map, threshold=32)
            inference_time = _t.toc()
            cat_img = np.concatenate((ori_img, re_img, mask), axis=1)
            cv2.imwrite(os.path.join(save_dir, type, '{:d}.png'.format(k)), cat_img)
            cost_time.append(inference_time)
            if (k+1) % 20 == 0:
                print('{}th image, cost time: {:.1f}'.format(k+1, inference_time*1000))
            _t.clear()
    # calculate mean time
    cost_time = np.array(cost_time)
    cost_time = np.sort(cost_time)
    num = cost_time.shape[0]
    num90 = int(num*0.9)
    cost_time = cost_time[0:num90]
    mean_time = np.mean(cost_time)
    print('Mean_time: {:.1f}ms'.format(mean_time*1000))
コード例 #6
0
def benchmark_girvan_newman(G):
    print("Starting benchmark_girvan_newman...")
    max_iteration_level = 10

    Y_our = []
    Y_nx = []

    our_it = our_girvan_newman(G)
    nx_it = nx_girvan_newman(G)

    our_it = itertools.islice(our_it, max_iteration_level)
    nx_it = itertools.islice(nx_it, max_iteration_level)

    i = 0
    while True:
        try:
            with Timer(f"Starting iteration {i} on our girvan newman") as t:
                next(our_it)
                Y_our.append(t.get_time())
            with Timer(f"Starting iteration {i} on nx girvan newman") as t:
                next(nx_it)
                Y_nx.append(t.get_time())
        except StopIteration:
            break
        i += 1

    X = list(range(len(Y_our)))
    plt.figure()
    plt.title("Time over iteration for executing girvan newman")
    plt.xlabel("Iteration level []")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our, label="Our implementation")
    plt.plot(X, Y_nx, label="Networkx implementation")
    plt.legend()
    plt.savefig("benchmark_girvan_newman.png")

    Y_our_cumulative = get_cumulative_array(Y_our)
    Y_nx_cumulative = get_cumulative_array(Y_nx)

    plt.figure()
    plt.title("Cumulative time over iteration for executing girvan newman")
    plt.xlabel("Iteration level []")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our_cumulative, label="Our implementation")
    plt.plot(X, Y_nx_cumulative, label="Networkx implementation")
    plt.legend()
    plt.savefig("benchmark_girvan_newman_cumulative.png")
コード例 #7
0
 def __init__(self, *args, **kwargs):
     '''
     Do not overload me!
     '''
     self.testStart(*args, **kwargs)
     self.TEST_TIMER = Timer()
     self.PERIODIC_DONE = False
     self.END_DONE = False
コード例 #8
0
ファイル: magazine.py プロジェクト: med-jed/rc2020-1
    def __init__(self):
        '''
        '''
        self.feed_motor = SparkMax(MAGAZINE_FEED_MOTOR)
        self.left_agitator = SparkMax(MAGAZINE_LEFT_MOTOR)
        self.right_agitator = SparkMax(MAGAZINE_RIGHT_MOTOR)

        # Timer used to get motor up to speed
        self.timer = Timer()
def test_chip(test_set, rebuilder, transform, save_dir, configs):
    _t = Timer()
    cost_time = list()
    iou_list={}
    s_map_list=list()
    for type in test_set.test_dict:
        img_list = test_set.test_dict[type]
        if not os.path.exists(os.path.join(save_dir, type)):
            os.mkdir(os.path.join(save_dir, type))
            os.mkdir(os.path.join(save_dir, type, 'ori'))
            os.mkdir(os.path.join(save_dir, type, 'gen'))
            os.mkdir(os.path.join(save_dir, type, 'mask'))
        if not os.path.exists(os.path.join(save_dir, type,'ROC_curve')):
            os.mkdir(os.path.join(save_dir, type, 'ROC_curve'))
        for k, path in enumerate(img_list):
            name= path.split('/')[-1]
            image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
            _t.tic()
            ori_img, input_tensor = transform(image)
            out = rebuilder.inference(input_tensor)
            re_img = out[0]
            s_map = ssim_seg(ori_img, re_img, win_size=11, gaussian_weights=True)
            _h, _w = image.shape
            s_map_save = cv2.resize(s_map, (_w, _h))
            s_map_list.append(s_map_save.reshape(-1,1))
            mask = seg_mask(s_map, threshold=128)
            inference_time = _t.toc()
            if configs['db']['resize'] == [832, 832]:
                #cat_img = np.concatenate((ori_img[32:-32,32:-32], re_img[32:-32,32:-32], mask[32:-32,32:-32]), axis=1)
                cv2.imwrite(os.path.join(save_dir, type, 'ori', 'mask{:d}.png'.format(k)), ori_img[32:-32,32:-32])
                cv2.imwrite(os.path.join(save_dir, type, 'gen', 'mask{:d}.png'.format(k)), re_img[32:-32,32:-32])
                cv2.imwrite(os.path.join(save_dir, type, 'mask', 'mask{:d}.png'.format(k)), mask[32:-32,32:-32])
            elif configs['db']['resize'] == [768, 768]:
                cv2.imwrite(os.path.join(save_dir, type, 'ori', 'mask{:d}.png'.format(k)), ori_img)
                cv2.imwrite(os.path.join(save_dir, type, 'gen', 'mask{:d}.png'.format(k)), re_img)
                cv2.imwrite(os.path.join(save_dir, type, 'mask', 'mask{:d}.png'.format(k)), mask)
            elif configs['db']['resize'] == [256, 256]:
                cv2.imwrite(os.path.join(save_dir, type, 'ori', name), ori_img)
                cv2.imwrite(os.path.join(save_dir, type, 'gen', name), re_img)
                cv2.imwrite(os.path.join(save_dir, type, 'mask', name), mask)
            else:
                raise Exception("invaild image size")
            #cv2.imwrite(os.path.join(save_dir, type, '{:d}.png'.format(k)), cat_img)
            cost_time.append(inference_time)
            if (k+1) % 20 == 0:
                print('{}th image, cost time: {:.1f}'.format(k+1, inference_time*1000))
            _t.clear()
        torch.save(s_map_list,os.path.join(save_dir) + '/s_map.pth')
    # calculate mean time
    cost_time = np.array(cost_time)
    cost_time = np.sort(cost_time)
    num = cost_time.shape[0]
    num90 = int(num*0.9)
    cost_time = cost_time[0:num90]
    mean_time = np.mean(cost_time)
    print('Mean_time: {:.1f}ms'.format(mean_time*1000))
    test_set.eval(save_dir)
コード例 #10
0
 def startup(self, now, persistant):
     self.state_machine.setup_states({
         'GAME': Game(),
         'CREDITS': Credits()
     }, 'CREDITS')
     self.state_machine.state.startup(now, persistant)
     self.should_flip = False
     self.blink = True
     self.timer_delay = Timer(1000)
コード例 #11
0
ファイル: turret.py プロジェクト: med-jed/rc2020-1
    def __init__(self):
        self.clockwise_limit_switch = DigitalInput(
            TURRET_CLOCKWISE_LIMIT_SWITCH)
        self.counterclockwise_limit_switch = DigitalInput(
            TURRET_COUNTERCLOCKWISE_LIMIT_SWITCH)

        self.turn_motor = SparkMax(TURRET_TURN_MOTOR)
        self.turn_pid = PIDController(0.4, 0.001, 0.02)

        self.shoot_motor_1 = Falcon(TURRET_SHOOT_MOTORS[0])
        self.shoot_motor_2 = Falcon(TURRET_SHOOT_MOTORS[1])
        self.timer = Timer()

        self.limelight = Limelight()
コード例 #12
0
def benchmark_edge_betweenness_centrality(G):
    print("Starting benchmark_edge_betweenness_centrality...")

    X = list(range(0, len(G.nodes), 5))
    Y_our = []
    Y_nx = []

    for i in X:
        sub_G = G.subgraph(list(G.nodes)[0:i])
        with Timer("Starting our edge_betweenness_centrality") as t:
            our_edge_betweenness_centrality(sub_G)
            Y_our.append(t.get_time())
        with Timer("Starting nx edge_betweenness_centrality") as t:
            nx_edge_betweenness_centrality(sub_G)
            Y_nx.append(t.get_time())

    plt.figure()
    plt.title("Time over graph size for executing edge_betweenness_centrality")
    plt.xlabel("Iteration level []")
    plt.ylabel("Time [s]")
    plt.plot(X, Y_our, label="Our implementation")
    plt.plot(X, Y_nx, label="Networkx implementation")
    plt.legend()
    plt.savefig("benchmark_edge_betweenness_centrality.png")
コード例 #13
0
def _script_for_patch_feature(head,
                              tail,
                              device,
                              dataset='mscoco',
                              data_split='train'):
    data_dir = osp.join(DATA_ROOT, dataset)

    # set the model path here
    prototxt = osp.join(CAFFE_ROOT, 'models', 'ResNet',
                        'ResNet-152-deploy.prototxt')
    caffemodel = osp.join(CAFFE_ROOT, 'models', 'ResNet',
                          'ResNet-152-model.caffemodel')

    # load network
    net, transformer = load_network(prototxt, caffemodel, device)

    # prepare image files
    cap_file = osp.join(data_dir, 'captions_{}.json'.format(data_split))
    image_ids = json.load(open(cap_file, 'r'))['image_ids']
    im_files = [
        'COCO_{}2014_'.format(data_split) + str(i).zfill(12) + '.jpg'
        for i in image_ids
    ]
    im_files = [osp.join(data_dir, data_split + '2014', i) for i in im_files]
    with h5py.File(osp.join(data_dir, 'bbox.h5'), 'r') as f:
        bbox = np.array(f[data_split])

    # initialize h5 file
    save_file = osp.join(data_dir, 'features', 'features_30res.h5')
    with h5py.File(save_file) as f:
        if data_split not in f:
            f.create_dataset(data_split, [len(im_files), 30, 2048], 'float32')

    # computing
    timer = Timer()
    print '\n\n... computing'
    for i in xrange(head, tail):
        timer.tic()
        im = caffe.io.load_image(im_files[i])  # (h, w, c)
        with h5py.File(save_file) as f:
            feat = cnn_patch_feature(net, transformer, im, bbox[i, :, :])
            f[data_split][i, :] = feat
        print '[{:d}]  {}  [{:.3f} sec]'.format(i,
                                                osp.split(im_files[i])[-1],
                                                timer.toc())
コード例 #14
0
ファイル: train.py プロジェクト: jingmouren/monogreedy
def train(model, beam_searcher, train_set, valid_set, save_dir, lr,
          display=100, starting=0, endding=20, validation=2000, life=10, logger=None):
    """
    display:    output training infomation every 'display' mini-batches
    starting:   the starting snapshots, > 0 when resuming training
    endding:    the least training snapshots
    validation: evaluate on validation set every 'validation' mini-batches
    life:       increase of endding when finds better model
    """
    train_func, _ = adam_optimizer(model, lr=lr)
    print '... training'
    logger = Logger(save_dir) if logger is None else logger
    timer = Timer()
    loss = 0
    imb = starting * validation
    best = -1
    best_snapshot = -1
    timer.tic()
    while imb < endding*validation:
        imb += 1
        x = train_set.iterate_batch()
        loss += train_func(*x)[0] / display
        if imb % display == 0:
            logger.info('snapshot={}, iter={},  loss={:.6f},  time={:.1f} sec'.format(imb/validation, imb, loss, timer.toc()))
            timer.tic()
            loss = 0
        if imb % validation == 0:
            saving_index = imb/validation
            model.save_to_dir(save_dir, saving_index)
            try:
                scores = validate(beam_searcher, valid_set, logger)
                if scores[3] > best:
                    best = scores[3]
                    best_snapshot = saving_index
                    endding = max(saving_index+life, endding)
                logger.info('    ---- this Bleu-4 = [%.3f],   best Bleu-4 = [%.3f], endding -> %d' % \
                            (scores[3], best, endding))
            except OSError:
                print '[Ops!! OS Error]'

    logger.info('Training done, best snapshot is [%d]' % best_snapshot)
    return best_snapshot
コード例 #15
0
def HASE(b4, A_inverse, b_cov, C, N_con, DF):

    with Timer() as t:

        B13 = b_cov
        B4 = b4

        A1_B_constant = np.tensordot(A_inverse[:, :, 0:(N_con)],
                                     B13,
                                     axes=([2], [0]))

        A1_B_nonconstant = np.einsum('ijk,il->ijl',
                                     A_inverse[:, :, N_con:N_con + 1], B4)

        A1_B_full = A1_B_constant + A1_B_nonconstant

        BT_A1B_const = np.einsum('ij,lji->li', B13.T, A1_B_full[:,
                                                                0:(N_con), :])

        BT_A1B_nonconst = np.einsum('ijk,ijk->ijk', B4[:, None, :],
                                    A1_B_full[:, (N_con):N_con + 1, :])

        BT_A1B_full = BT_A1B_const[:, None, :] + BT_A1B_nonconst

        C_BTA1B = BT_A1B_full - C.reshape(1, -1)

        C_BTA1B = np.abs(C_BTA1B)

        a44_C_BTA1B = C_BTA1B * A_inverse[:, (N_con):N_con + 1,
                                          (N_con):N_con + 1]

        a44_C_BTA1B = np.sqrt((a44_C_BTA1B))

        t_stat = np.sqrt(DF) * np.divide(A1_B_full[:, (N_con):N_con + 1, :],
                                         a44_C_BTA1B)

        SE = a44_C_BTA1B / np.sqrt(DF)

    print "time to compute GWAS for {} phenotypes and {} SNPs .... {} sec".format(
        b4.shape[1], A_inverse.shape[0], t.secs)
    return t_stat, SE
コード例 #16
0
ファイル: sampler.py プロジェクト: ryanho9453/text_clustering
    def run(self, matrix, maxiter=30):   # matrix shape(#doc, #word)
        """
        Run the Gibbs sampler.
        """
        n_docs, vocab_size = matrix.shape

        self._initialize(matrix)

        for it in range(maxiter):
            timer = Timer()
            timer.start()

            print('--- iter '+str(it))

            for m in range(n_docs):
                # 在doc m下的第i個字 --  w (此處的w是td_matrix中,word的index)
                for i, w in enumerate(word_indices(matrix[m, :])):
                    z = self.topics[(m, i)]
                    self.nmz[m, z] -= 1
                    self.nm[m] -= 1
                    self.nzw[z, w] -= 1
                    self.nz[z] -= 1

                    p_z = self._conditional_distribution(m, w)
                    z = sample_index(p_z)

                    self.nmz[m, z] += 1
                    self.nm[m] += 1
                    self.nzw[z, w] += 1
                    self.nz[z] += 1
                    self.topics[(m, i)] = z

            timer.print_time()
            print('--- end iter')

            # FIXME: burn-in and lag!
            yield self.phi_pzw()
コード例 #17
0
    def _HRZColumnsMap(self):
        return {}

    def _CollectInHomeDeviceID(self):
        return [
            "W634iMCwmSCcjQkltb7d38btv000%02d" % i
            for i in [18, 6, 23, 1, 13, 12, 4, 5, 24, 14]
        ]

    def _unnormalDataPrecess(self, a, b, c, d):
        return a, b, c, d


class BXYMain(MainModel):
    def dataReader(self, startDate, endDate, HRZID, Code):
        return XK(startDate, endDate, HRZID, Code)

    def genePsudoWeather(self, stageData, weatherIndex, pLength):
        return [-1]


if __name__ == '__main__':
    # 声明当前使用的时钟对象
    Clock = Timer('2018-02-26 00:00:00', 15)
    # 声明当前使用的模型对象
    Model = CNNModel
    BXY_1 = BXYMain(True, '2017-12-15 00:00:00', Clock, 30, Model, [
        u"瞬时流量", u'气象站室外温度', u"气象站室外湿度", u"气象站室外风速", u'气象站室外光照', u"回水压力",
        u'供水流量', u"燃气温度"
    ], [u'一次回温度'], [1], False, [1, 2, 3, 4], [0])
    testVariable = BXY_1.main()
def test_mvtec(test_set, rebuilder, transform, save_dir, threshold_seg_dict, configs):
    _t = Timer()
    cost_time = list()
    if not os.path.exists(os.path.join(save_dir, 'ROC_curve')):
        os.mkdir(os.path.join(save_dir, 'ROC_curve'))
    for item in test_set.test_dict:
        s_map_list = list()
        s_map_good_list=list()
        item_dict = test_set.test_dict[item]

        if not os.path.exists(os.path.join(save_dir, item)):
            os.mkdir(os.path.join(save_dir, item))
            os.mkdir(os.path.join(save_dir, item, 'ori'))
            os.mkdir(os.path.join(save_dir, item, 'gen'))
            os.mkdir(os.path.join(save_dir, item, 'mask'))
            #os.mkdir(os.path.join(save_dir, item))
        for type in item_dict:
            if not os.path.exists(os.path.join(save_dir, item, 'ori', type)):
                os.mkdir(os.path.join(save_dir, item, 'ori', type))
            if not os.path.exists(os.path.join(save_dir, item, 'gen', type)):
                os.mkdir(os.path.join(save_dir, item, 'gen', type))
            if not os.path.exists(os.path.join(save_dir, item, 'mask', type)):
                os.mkdir(os.path.join(save_dir, item, 'mask', type))
            _time = list()
            img_list = item_dict[type]
            for path in img_list:
                image = cv2.imread(path, cv2.IMREAD_COLOR)
                ori_h, ori_w, _ = image.shape
                _t.tic()
                ori_img, input_tensor = transform(image)
                out = rebuilder.inference(input_tensor)
                re_img = out.transpose((1, 2, 0))
                s_map = ssim_seg_mvtec(ori_img, re_img, configs, win_size=3, gaussian_weights=True)
                if threshold_seg_dict: # dict is not empty
                    mask = seg_mask_mvtec(s_map, threshold_seg_dict[item],configs)
                else:
                    mask = seg_mask_mvtec(s_map, 64, configs)
                inference_time = _t.toc()
                img_id = path.split('.')[0][-3:]
                cv2.imwrite(os.path.join(save_dir, item, 'ori', type, '{}.png'.format(img_id)), ori_img)
                cv2.imwrite(os.path.join(save_dir, item, 'gen', type, '{}.png'.format(img_id)), re_img)
                cv2.imwrite(os.path.join(save_dir, item, 'mask', type, '{}.png'.format(img_id)), mask)
                _time.append(inference_time)
                if type != 'good':
                    s_map_bad=s_map.reshape(-1,1)
                    s_map_list.append(s_map_bad)
                else:
                    s_map_good = s_map.reshape(-1, 1)
                    s_map_good_list.append(s_map_good)
            cost_time += _time
            mean_time = np.array(_time).mean()
            print('Evaluate: Item:{}; Type:{}; Mean time:{:.1f}ms'.format(item, type, mean_time*1000))
            _t.clear()
        torch.save(s_map_list, os.path.join(save_dir, item) + '/s_map.pth')
        torch.save(s_map_good_list, os.path.join(save_dir, item) + '/s_map_good.pth')

    # calculate mean time
    cost_time = np.array(cost_time)
    cost_time = np.sort(cost_time)
    num = cost_time.shape[0]
    num90 = int(num*0.9)
    cost_time = cost_time[0:num90]
    mean_time = np.mean(cost_time)
    print('Mean_time: {:.1f}ms'.format(mean_time*1000))

    # evaluate results
    print('Evaluating...')
    test_set.eval(save_dir)
コード例 #19
0
def train():
    args = get_arguments()
    print '*' * 10 + ' args ' + '*' * 10
    print args
    print '*' * 26
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)

    if args.data_provider == "coco":
        #pass
        coco_provider_train = COCO_detection_train(args.root_dir,
                                                   args.json_file_train)
        coco_provider_val = COCO_detection_train(args.root_dir,
                                                 args.json_file_val)

        data_provider_train = MultiDataProvider([coco_provider_train],
                                                crop_size=args.crop_size,
                                                min_ratio=min_ratio,
                                                max_ratio=max_ratio,
                                                hor_flip=hor_flip)

        data_provider_val = MultiDataProvider([coco_provider_val],
                                              crop_size=args.crop_size,
                                              min_ratio=1,
                                              max_ratio=1.1,
                                              hor_flip=False)

    else:
        raise RuntimeError, 'unknown data provider type'

    solver_param = SolverParameter()
    solver_param.type = args.type
    solver_param.base_lr = args.learning_rate
    solver_param.lr_policy = args.lr_policy
    solver_param.gamma = args.gamma
    solver_param.stepsize = args.step_size
    solver_param.gpu_list = args.gpus_list
    solver_param.exclude_scope = args.exclude_scope

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)
    solver = Solver(solver_param, sess)

    total_size = args.batch_size * solver.num_gpus
    print('total size for single forward: ', total_size)

    with tf.name_scope('input'):
        inputs = {
            'images':
            tf.placeholder(tf.float32,
                           shape=(total_size, args.crop_size, args.crop_size,
                                  3),
                           name='input_images')
        }
        label_gt = {
            'labels':
            tf.placeholder(dtype=tf.int32,
                           shape=(total_size, int(args.crop_size * scale),
                                  int(args.crop_size * scale)),
                           name='input_label')
        }

    global_step = tf.train.get_or_create_global_step()
    nas_refine_net = nas_refine_model(num_cls=args.num_classes,
                                      is_training=True,
                                      ohem=args.ohem,
                                      mining_ratio=args.mining_ratio)

    train_op, total_loss, metric = solver.deploy(
        model_fn=nas_refine_net.infer,
        loss_fn=nas_refine_net.loss_fn,
        eval_fn=nas_refine_net.eval_metric,
        inputs=inputs,
        targets=label_gt,
        total_size=total_size,
        regularize=True)

    miou = metric['miou']
    tf.summary.scalar('miou', miou)
    tf.summary.scalar('total_loss', total_loss)
    tf.summary.scalar('lr', solver.learning_rate)

    ### init
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    ### restore
    flag = STATUS.NO_INIT
    variables_to_restore = None
    if args.init_weights != None:
        flag = STATUS.FINTTUNE
        # if not os.path.exists(args.init_weights):
        #     raise RuntimeError, '{} does not exist for finetuning'.format(args.init_weights)
        init_weights = args.init_weights
        variables_to_restore = solver.get_variables_finetune()

    if args.solver_state != None:
        flag = STATUS.CONTINUE
        if os.path.isdir(args.solver_state):
            solver_state = tf.train.latest_checkpoint(args.solver_state)
        else:
            # if not os.path.exists(args.solver_state):
            #     raise RuntimeError, '{} does not exist for continue training'.format(args.solver_state, flag)
            solver_state = args.solver_state
        variables_to_restore = solver.get_variables_continue_training()

    if flag == STATUS.FINTTUNE:
        loader = tf.train.Saver(var_list=variables_to_restore)
        loader.restore(sess, init_weights)
        print('{} loaded'.format(init_weights))
    elif flag == STATUS.CONTINUE:
        loader = tf.train.Saver(var_list=variables_to_restore)
        loader.restore(sess, solver_state)
        print('{} loaded'.format(solver_state))

    all_summaries = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(args.save_dir)
    saver = tf.train.Saver()

    label_sz = int(args.crop_size * scale)
    #timer = Timer()
    for step in range(1, args.num_steps):
        #print 'step: ', step
        #data_blob = np.zeros((total_size, args.crop_size, args.crop_size, 3), dtype=np.float32)
        #label_blob = np.zeros((total_size, label_sz, label_sz), dtype=np.int32)
        data_blob = np.zeros((0, args.crop_size, args.crop_size, 3),
                             dtype=np.float32)
        label_blob = np.zeros((0, label_sz, label_sz), dtype=np.int32)
        timer = Timer()
        timer.tic()
        for cur_id in range(solver.num_gpus):
            #start = int(cur_id * args.batch_size)
            #end = int((cur_id + 1) * args.batch_size)
            while True:
                images, labels = data_provider_train.get_batch(args.batch_size)
                #data_blob[start:end] = images
                labels_resize = np.zeros((labels.shape[0], label_sz, label_sz),
                                         dtype=np.int32)
                for n in range(labels.shape[0]):
                    labels_resize[n] = cv2.resize(
                        labels[n], (label_sz, label_sz),
                        interpolation=cv2.INTER_NEAREST)

                #label_blob[start:end] = labels_resize

                if np.any(labels_resize >= 0):
                    data_blob = np.concatenate((data_blob, images), axis=0)
                    label_blob = np.concatenate((label_blob, labels_resize),
                                                axis=0)
                    break

        assert label_blob.shape[0] == total_size
        assert data_blob.shape[0] == total_size

        ### run training op
        _, losses_value, _, summary, global_step_val = sess.run(
            [train_op, total_loss, metric['op'], all_summaries, global_step],
            feed_dict={
                inputs['images']: data_blob,
                label_gt['labels']: label_blob
            })

        summary_writer.add_summary(summary, global_step=global_step_val)

        ### show
        if step % args.show_steps == 0:
            t1 = timer.toc()
            print(
                'step: {}, lr: {}, loss_value: {}, miou: {}, time: {} / per iter'
                .format(global_step_val, sess.run(solver.learning_rate),
                        losses_value, miou.eval(session=sess), t1))

            #time = timer.toc()

            ## save
        if step % args.save_step == 0:
            save(saver, sess, args.save_dir, step=global_step_val)

    ### test
        if step % args.val_steps == 0:
            test_loss = 0
            print('#' * 5 + ' testing ' + '#' * 5)
            for kk in range(test_iter):
                for cur_id in range(solver.num_gpus):
                    start = int(cur_id * args.batch_size)
                    end = int((cur_id + 1) * args.batch_size)
                    while True:
                        images, labels = data_provider_val.get_batch(
                            args.batch_size)

                        data_blob[start:end] = images
                        labels_resize = np.zeros(
                            (labels.shape[0], label_sz, label_sz),
                            dtype=np.int32)
                        for n in range(labels.shape[0]):
                            labels_resize[n] = cv2.resize(
                                labels[n], (label_sz, label_sz),
                                interpolation=cv2.INTER_NEAREST)
                        label_blob[start:end] = labels_resize
                        if np.any(labels_resize >= 0):
                            break

                losses_value = sess.run([total_loss],
                                        feed_dict={
                                            inputs['images']: data_blob,
                                            label_gt['labels']: label_blob
                                        })
                test_loss += losses_value[0]

            print('global_step_val: {}, test loss: {}'.format(
                global_step_val,
                float(test_loss) / test_iter))
            print('#' * 19)
コード例 #20
0
def test_mvtec(test_set, rebuilder, transform, save_dir, threshold_seg_dict, val_index):
    _t = Timer()
    cost_time = list()
    threshold_dict = dict()
    if not os.path.exists(os.path.join(save_dir, 'ROC_curve')):
        os.mkdir(os.path.join(save_dir, 'ROC_curve'))
    for item in test_set.test_dict:
        threshold_list = list()
        item_dict = test_set.test_dict[item]

        if not os.path.exists(os.path.join(save_dir, item)):
            os.mkdir(os.path.join(save_dir, item))
            os.mkdir(os.path.join(save_dir, item, 'ori'))
            os.mkdir(os.path.join(save_dir, item, 'gen'))
            os.mkdir(os.path.join(save_dir, item, 'mask'))
        for type in item_dict:
            if not os.path.exists(os.path.join(save_dir, item, 'ori', type)):
                os.mkdir(os.path.join(save_dir, item, 'ori', type))
            if not os.path.exists(os.path.join(save_dir, item, 'gen', type)):
                os.mkdir(os.path.join(save_dir, item, 'gen', type))
            if not os.path.exists(os.path.join(save_dir, item, 'mask', type)):
                os.mkdir(os.path.join(save_dir, item, 'mask', type))
            _time = list()
            img_list = item_dict[type]
            for path in img_list:
                image = cv2.imread(path, cv2.IMREAD_COLOR)
                ori_h, ori_w, _ = image.shape
                _t.tic()
                ori_img, input_tensor = transform(image)
                out = rebuilder.inference(input_tensor)
                re_img = out.transpose((1, 2, 0))
                s_map = ssim_seg(ori_img, re_img, win_size=11, gaussian_weights=True)
                s_map = cv2.resize(s_map, (ori_w, ori_h))
                if val_index == 1:
                    mask = seg_mask(s_map, threshold=threshold_seg_dict[item])
                elif val_index == 0:
                    mask = seg_mask(s_map, threshold=threshold_seg_dict)
                else:
                    raise Exception("Invalid val_index")

                inference_time = _t.toc()
                img_id = path.split('.')[0][-3:]
                cv2.imwrite(os.path.join(save_dir, item, 'ori', type, '{}.png'.format(img_id)), ori_img)
                cv2.imwrite(os.path.join(save_dir, item, 'gen', type, '{}.png'.format(img_id)), re_img)
                cv2.imwrite(os.path.join(save_dir, item, 'mask', type, '{}.png'.format(img_id)), mask)
                _time.append(inference_time)

                if type != 'good':
                    threshold_list.append(s_map)
                else:
                    pass

            cost_time += _time
            mean_time = np.array(_time).mean()
            print('Evaluate: Item:{}; Type:{}; Mean time:{:.1f}ms'.format(item, type, mean_time*1000))
            _t.clear()
        threshold_dict[item] = threshold_list
    # calculate mean time
    cost_time = np.array(cost_time)
    cost_time = np.sort(cost_time)
    num = cost_time.shape[0]
    num90 = int(num*0.9)
    cost_time = cost_time[0:num90]
    mean_time = np.mean(cost_time)
    print('Mean_time: {:.1f}ms'.format(mean_time*1000))

    # evaluate results
    print('Evaluating...')
    test_set.eval(save_dir,threshold_dict)
コード例 #21
0
def run(cfg: config.EvolutionConfig) -> None:
    toolbox = create_toolbox(cfg)

    population = toolbox.population(n=cfg.population_size)
    fitnesses = [toolbox.evaluate(el) for el in population]
    for ind, fit in zip(population, fitnesses):
        ind.fitness.values = fit

    CXPB, MXPB = 0.3, 0.3
    child_per_parent = 2

    i = 0
    with Timer() as timer, SolutionTracer(
            filename=
            f"Evolutionary_CXPB_{CXPB}_MXPB_TS_{TOURNAMENT_SIZE}_{MXPB}_CPP_{child_per_parent}_I_{cfg.max_iterations}_PS_{cfg.population_size}",
            max_repetitions=cfg.max_iterations,
            id=cfg.id,
            clues=cfg.clues) as solution_tracer:
        while cfg.max_iterations > i:
            i += 1

            offspring = population
            # Clone the selected individuals
            offspring = list(map(toolbox.clone, child_per_parent * offspring))

            for child1, child2 in zip(offspring[::2], offspring[1::2]):
                if random.random() < CXPB:
                    # if random.random() < 0.5:
                    toolbox.mate_score(child1, child2)
                    # else:
                    #     toolbox.mate_r(child1, child2)
                    del child1.fitness.values
                    del child2.fitness.values

            for mutant in offspring:
                if random.random() < MXPB:
                    if random.random() < 0.8:
                        toolbox.mutate_swap_many(mutant)
                    else:
                        toolbox.mutate_swap_one(mutant)
                    del mutant.fitness.values

            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            best_offspring = []
            if child_per_parent > 1:
                for begin, end in zip(
                        range(0,
                              len(offspring) - child_per_parent + 1,
                              child_per_parent),
                        range(child_per_parent,
                              len(offspring) + 1, child_per_parent)):
                    best_offspring.append(
                        sorted(offspring[begin:end],
                               key=lambda x: x.fitness.values[0])[0])
            else:
                best_offspring = offspring

            population = choose_unique(best_offspring)
            population = toolbox.select_t(population, cfg.population_size)

            # saving and checking stats
            best = tools.selBest(population, 1)[0]
            solution_tracer.update(best, timer.elapsed)
            if best.fitness.values[0] == 0:
                break
            if i % 100 == 0:
                print(i, best.fitness)
                print(best.sudoku)

    best = tools.selBest(population, 1)[0]
    print(best.fitness)
    print(best.sudoku)
コード例 #22
0
# with Timer('Explicit inversion'):
# 	A_inv = np.linalg.inv(A)
# 	x = np.dot(A_inv, -s)  # fun fact: this can be also written as A_inv @ (-s)

# scipy's LU factorization
# with Timer('LU solver'):
# 	LU, piv = sl.lu_factor(A, overwrite_a=False, check_finite=False)
# 	x = sl.lu_solve((LU, piv), -s)

# scipy version of linalg.solve
# with Timer('scipy.linalg.solve'):
# 	x = sl.solve(A, -s, assume_a='sym', overwrite_a=False, overwrite_b=True, check_finite=False)

# low-level LAPACK solver
lapack_gesv = sl.get_lapack_funcs('gesv', (A, -s))
with Timer('Low-level LAPACK solver'):
    _, _, x, _ = lapack_gesv(A, -s)

# LDL decomposition - rather slow and pretty numerically unstable
# with Timer('LDL decomposition'):
# 	lu, d, perm = sl.ldl(A, overwrite_a=True, check_finite=False)
# 	L = lu[perm]
# 	z = sl.solve(L, -s, overwrite_a=True, check_finite=False)
# 	y = sl.solve(d, z, overwrite_a=True, check_finite=False)
# 	x = sl.solve(d @ L.T, y, overwrite_a=True, check_finite=False)

# plot the sites, colored with their value of x
plt.figure(figsize=(8, 8))
sc = plt.scatter(positions[:, 0], positions[:, 1], c=x, cmap='viridis')
plt.gca().set_aspect('equal')
plt.colorbar(sc, fraction=0.046, pad=0.035)
コード例 #23
0
def discreteness_constraint(x):
	return (x - 0.5)*(x - 0.5) - 0.25
def constr_jac(x):
	return 2 * np.identity(n) * x - 1.
def constr_hess(x, v):
	return 2 * np.identity(n) * v

# linear constraint matrix
C = np.ones((1, n), dtype=np.float32)
# C[1:] = np.identity(n, dtype=np.float32)
# lb = np.zeros(n+1, dtype=np.float32)
# lb[0] = m
# ub = np.ones(n+1, dtype=np.float32)
# ub[0] = m

with Timer('minimizer'):
	res = so.minimize(energy, x0=np.ones_like(s), jac=jac, hess=hess, method='trust-constr',
		              constraints=[so.LinearConstraint(C, m, m),
		                           so.NonlinearConstraint(discreteness_constraint, 0, 1e-8)])
	x = res.x
	# x = np.round(res.x)
print(f"Final energy: {energy(x)}")
print(x)
print(np.sum(x))

# plot the sites, colored with their value of x
plt.figure(figsize=(8, 8))
sc = plt.scatter(positions[:, 0], positions[:, 1], c=x, cmap='viridis')
plt.gca().set_aspect('equal')
plt.colorbar(sc, fraction=0.046, pad=0.035)
plt.show()
コード例 #24
0
    with open(cfg_file, "r") as f:
        configs = json.load(f)
    start_epoch = configs['op']['start_epoch']
    max_epoch = configs['op']['max_epoch']
    learning_rate = configs['op']['learning_rate']
    decay_rate = configs['op']['decay_rate']
    epoch_steps = configs['op']['epoch_steps']
    snapshot = configs['op']['snapshot']
    batch_size = configs['db']['batch_size']
    loader_threads = configs['db']['loader_threads']
    save_dir = configs['system']['save_dir']

    # init Timer
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    _t = Timer()

    # create log file
    log = Log(args.log_dir, args.cfg)
    log.wr_cfg(configs)

    # load data set
    training_set = load_data_set_from_factory(configs, 'train')
    print('Data set: {} has been loaded'.format(configs['db']['name']))

    # load model
    trainer = load_training_model_from_factory(configs, ngpu=args.ngpu)
    if configs['system']['resume']:
        trainer.load_params(configs['system']['resume_path'])
    print('Model: {} has been loaded'.format(configs['model']['name']))
コード例 #25
0
    def fit( self, data,
            iterations=1000,
            batch_size=64,
            k=2,
            out_dir='./out',
            out_iter=5 ):
        #some helpful things
        ones = np.ones( ( batch_size, 1 ) ).astype( 'float32' )
        minus_ones = ones * -1.
        timer = Timer( iterations )
        output_pattern = out_dir + '/{:0' + str( len( str( iterations ) ) ) + 'd}.png' #erghh if it is stupid but it works it is not stupid
        clear = '\r                                                                                                                                                '
        progress = '{} | D( loss:\t{:0.2f}, diff:\t{:0.2f}, norm:\t{:0.2f}, ; G( loss:\t{:0.2f}  )'

        #get some noise:
        out_samples = self.make_some_noise()
        distance_samples = self.make_some_noise( 10 )

        print( distance( distance_samples ) )

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            # print( distance( np.clip( ( self.generate( distance_samples ) + 1 ) * 127.5, 0, 255 ) ) )
            io.imsave( out_dir + '/real_samples.png',
                image_grid( np.clip( ( data[ : 64 ] + 1 ) * 127.5, 0, 255 ) ), # make sure we have valid values
                plugin='pil' )

        for i in range( iterations ):
            timer.start_step()
            #train discriminator
            self.make_trainable( True )
            for j in range( k ):
                real_data = data[ np.random.choice( data.shape[0], batch_size, replace=False ), : ]
                fake_data = self.generate( self.make_some_noise( batch_size ) )
                epsilon = np.random.random( batch_size )
                interpolation = ( real_data.T * epsilon ).T + ( fake_data.T * ( 1 - epsilon ) ).T
                d_loss, d_diff, d_norm = self.dis_trainer.train_on_batch( [ real_data, fake_data, interpolation ], [ ones ] * 2 )

                ##something messed up
                # for l in self.dis_trainer.layers:
                #     weights = l.get_weights()
                #     replace = False
                #     for j, w in enumerate( weights ):
                #         if np.isnan( w ).any():
                #             weights[ j ] = np.nan_to_num( w )
                #             replace = True
                #     if replace:
                #         l.set_weights( weights )
                # if replace:
                #     print('\nfucking NaN man')

            #trian generator
            self.make_trainable( False )
            g_loss = self.gan.train_on_batch( self.make_some_noise( batch_size ), minus_ones )

            ##something messed up
            # for l in self.gan.layers:
            #     weights = l.get_weights()
            #     replace = False
            #     for j, w in enumerate( weights ):
            #         if np.isnan( w ).any():
            #             weights[ j ] = np.nan_to_num( w )
            #             replace = True
            #     if replace:
            #         l.set_weights( weights )
            # if replace:
            #     print('\nfucking NaN man')

            if np.isnan( d_loss ):
                for j,l in enumerate( self.gan.layers):
                    for k, w in enumerate(  l.get_weights() ):
                        w = np.nan_to_num( w )
                        print( '{}/{}: {}/{}'.format( j, k, np.min( w ), np.max( w ) )  )


            if i % out_iter == 0:
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    # print( distance( np.clip( ( self.generate( distance_samples ) + 1 ) * 127.5, 0, 255 ) ) )
                    io.imsave( output_pattern.format( i ),
                        image_grid( np.clip( ( self.generate( out_samples ) + 1 ) * 127.5, 0, 255 ) ), # make sure we have valid values
                        plugin='pil' )




            timer.stop_step()
            #progess reporting
            sys.stdout.write( clear )
            sys.stdout.flush()
            sys.stdout.write( '\r' + progress.format( timer.out_str(), d_loss, d_diff, d_norm, g_loss ) )
            sys.stdout.flush()
コード例 #26
0
 def start(self):
     self.timer = Timer.Timer(self.config.flight_fix_scan_interval, self.do)
     self.timer.start()