Exemple #1
0
def run(features, p=p, g=eqn_graph, k=30):
    global sl
    sl = Slipnet(g, p)
    pr(as_dict(p))
    print()
    print('Input:')
    pr(features)
    print()
    t0 = process_time()
    out_d = sl.dquery(features=features)
    t1 = process_time()
    pts(sl.topna(out_d, k=k))
    print()
    pts(sl.topna(out_d, pred=Equation, k=k))
    print()
    pts(sl.topna(out_d, pred=int, k=k))
    print()
    pts(sl.topna(out_d, pred=Before, k=20))
    print()
    pts(sl.topna(out_d, pred=After, k=20))
    print()
    pts(sl.topna(out_d, pred=isclass, k=20))
    print()
    pts(sl.topna(out_d, pred=Operator, k=20))
    print()
    #print(out_d[NumOperands(2)])
    #print(out_d[PrefixedNode(1, After(10))])
    #print(out_d[PrefixedNode(2, Before(10))])
    print(f'{t1 - t0:1.3f} sec')
Exemple #2
0
def exp1():
    global rmem
    rmem = RMem.run(operands=range(1, 4),
                    startc=(None, '+', 1, None, 3),
                    ndups=2,
                    niters=1000)
    pr(rmem.lsteps)
Exemple #3
0
def run(bricks: Sequence[int],
        target: int,
        seed: int = 1,
        num_slipnet_iterations: Optional[int] = None,
        paint_threshold: float = 0.1) -> None:
    global fm, ca, cr0, cr1, cr2, cr3, wa
    lenable(Agent, Codelet, Fizzle)  #, log_pulse)
    fm = FARGModel(slipnet=Slipnet(Graph.augment(eqn_graph, desnaggers_graph)),
                   seed=seed,
                   num_slipnet_iterations=num_slipnet_iterations,
                   paint_threshold=paint_threshold)
    ca = fm.build(StepCanvas([Step(tuple(bricks))]))
    cr0 = CellRef(ca, 0)
    cr1 = CellRef(ca, 1)
    cr2 = CellRef(ca, 2)
    cr3 = CellRef(ca, 3)
    wa = fm.build(
        Want(startcell=cr0,
             target=target,
             on_success=RaiseException(SolvedPuzzle)))

    #    try:
    fm.do_timestep(num=40)  # 20
    #    except Exception as exc:
    #        print(exc)
    pr(fm, extra=True)
Exemple #4
0
def xpgfid2() -> None:
    rmemcc = RMemCC.make_instance()
    common_kwargs = dict(
        #operands=[1, 2],
        operands=[1, 2, 3, 4, 5, 6],
        #operators=['+'],
        operators=['+', '-', 'x', '/'],
        npartial=4,  #3
        n_per_eqn=3,  #100,
        niters=100,  #100
        n_eqns=5,  #20
        show=True)
    basic_kwargs: Dict = dict()
    cc_kwargs = dict(
        rm=RMemCC.make_instance(funcs_to_count=(
            RMemFuncs.same,
            RMemFuncs.add_n(1),
            RMemFuncs.mul_by(2),
            RMemFuncs.add_n(2),
            RMemFuncs.sub_n(
                1),  # putting this one in lengthens the cycles by a lot
        )), )
    salt_kwargs = dict(rm=RMemSalt.make_instance(nsalt=5))
    for kwargs in [basic_kwargs, cc_kwargs, salt_kwargs]:
        #for kwargs in [salt_kwargs]:
        print()
        pr(kwargs)
        start_time = perf_counter()
        result = eqn_test(
            #show=True,
            **(common_kwargs | kwargs)  # type: ignore[arg-type]
        )
        test_duration = perf_counter() - start_time
        pr(result)
        lo(f'{sum(result.values())}      {test_duration:8.3f} sec')
Exemple #5
0
    def load_dataset(self, dataset_dirs, is_train=True):
        for i, n in enumerate(C.class_names[1:]):
            self.add_class("dataset", i, n)

        if isinstance(dataset_dirs, str):
            images_dirs = [dataset_dirs]
        elif isinstance(dataset_dirs, list):
            images_dirs = dataset_dirs
        else:
            error("'dataset_dirs' must be a list or a string")

        for images_dir in images_dirs:
            filenames = [
                f for f in os.listdir(images_dir)
                if re.match(r'sim-201[78]-[0-9].*\.png', f)
            ]
            if len(filenames) == 0:
                pr('*** No files found! ***')
                exit - 1
            pr("*** Directory: ", images_dir, "Number of images seen: ",
               len(filenames), "***")
            for filename in filenames:
                imid = filename[:-4]
                self.add_image(
                    'dataset',
                    image_id=imid,  # skip .png suffix
                    path=os.path.join(images_dir, filename),
                    annotation=os.path.join(images_dir, imid + '.txt'))
Exemple #6
0
 def run1(self,
          cue: Optional[CanvasAble] = None,
          rmem: Optional[RMem] = None,
          target_canvas: Optional[CanvasAble] = None,
          vv: int = 4) -> CanvasAble:
     '''Runs a single test, with a specified cue and a specified RMem
     object.'''
     cue_maker: Any = None
     if rmem is None or cue is None:
         rm, _, initial_canvases, cue_maker = self.make_setup()
         if cue is None:
             if target_canvas is None:
                 target_canvas = choice(initial_canvases)
                 '''
                 raise AttributeError(
                     'must specify target_canvas from which to generate cue.'
                 )
                 '''
             cue_maker = self.make_cue_maker()
             cue = cue_maker(target_canvas)
         if rmem is None:
             rmem = rm
     got = rmem.regenerate(canvas=cue).as_tuple()
     if vv >= 4:
         pr(rmem.lsteps)
     return got
Exemple #7
0
def xpgfid() -> None:
    '''Fidelity test: Does adding global count-columns enable the regeneration
    process to restore the original equation more often?'''
    # New absorb_canvas():
    #   For each pset in the limit cycle:
    #      raw_absorb it
    # Measure fidelity on grade-school table without count-columns.
    # Measure fidelity on grade-school table with count-columns.
    cls = type('RM', (WithCountColumns, RMem), {})
    rmem = cls()
    #eqn = (2, '+', 1, '=', 3)
    eqns = list(make_eqns(operands=range(1, 4), operators=['+']))
    for eqn in eqns:
        rmem.absorb_canvas(eqn)
    for eqn in eqns:
        startc = (None, None) + partial_eqn(eqn, k=4)
        got = rmem.run_gset(startc, niters=1000)
        lo(eqn)
        lo(startc)
        lo(got)
        lo()
    pr(rmem.lsteps)
    print()
    lo(startc)
    lo(eqn)
Exemple #8
0
def xp1() -> None:
    global rmem
    rmem = RMemAbs().absorb_canvases(pad_tup(e) for e in make_eqns([1], ['+']))
    new_eqn = (2, '+', 1, '=', 3)
    cues = [  # Parts of 2 + 1 = 3
        (2, '+', None, None, None), (None, None, 1, '=', 3),
        (2, '+', None, None, 3)
    ]
    relateds: Set[Tuple] = set()
    '''
    for cue in cues:
        print(rmem.run_gset(cue))
    '''
    while len(relateds) < 2:
        rel = tuple(x if random() < 0.3 else None for x in new_eqn)
        if all(x is None for x in rel):
            continue
        print(rel)
        got = rmem.regenerate(pad_tup(rel))
        print(got)
        #relateds.add(as_tuple(got.contents)[-5:])
        relateds.add(as_tuple(got)[-5:])
    print()
    pr(relateds)

    new_canvas = reduce(operator.add, relateds) + new_eqn
    rmem.absorb_canvases([new_canvas])

    new_cue = (None, ) * 10 + (None, '+', 1, None, 3)
    for _ in range(1):
        print()
        rmem.regenerate(new_cue, niters=100)
        pr(rmem.lsteps)
Exemple #9
0
def xp_single():
    global rmem
    rmem = RMem.run(operands=range(1, 2),
                    operators=('+'),
                    startc=(None, '+', None, None, None),
                    ndups=1,
                    niters=1000)
    pr(rmem.lsteps)
Exemple #10
0
def r4_5_6__15(*args, **kwargs):
    global fm, ca, wa
    fm = Numbo(*args, **kwargs)
    ca = fm.build(SeqCanvas([SeqState((4, 5, 6), None)]))
    wa = fm.build(Want(15, CellRef(ca, 0)))
    fm.do_timestep(num=19)
    pr(fm, edges=True)
    print()
    #fm.pr_flows()
    print(f'seed={fm.seed}')
Exemple #11
0
 def log(self, f: Indenting, **kwargs) -> None:
     print(self.name, file=f)
     with indent(f):
         for k, v in kwargs.items():
             if isinstance(v, dict):
                 print(f'{short(k)}=', file=f)
                 with indent(f):
                     pr(v, key=short, file=f)
             else:
                 print(f'{short(k)}={short(v)}', file=f)
Exemple #12
0
    def run(self, vv: Optional[int] = None) -> FidelityTestResult:
        '''Run a full test: all canvases and cues.'''
        vv: int = self.vv if vv is None else vv
        seed = reseed(self.seed)
        num_tests = 0  # number of tests actually run
        results: Dict[Tuple[BaseValue, ...], int] = defaultdict(int)

        if vv >= 1 and self.tspec:
            print(self.tspec)

        # Run the tests
        start_time = perf_counter()
        while num_tests <= self.nsamples:
            canvas = choice(self.initial_canvases)
            for _ in range(self.n_per_cue):
                num_tests += 1
                cue = self.canvas_to_cue(
                    canvas)  # type: ignore[misc, operator]
                if vv >= 3:
                    lo('  CUE', cue)
                got = as_tuple(self.rmem.regenerate(canvas=cue))
                if vv >= 3:
                    lo('  GOT', got)
                if vv >= 4:
                    pr(self.rmem.lsteps)
                yes = self.is_success(canvas,
                                      got)  # type: ignore[misc, operator]
                if yes:
                    results[as_tuple(canvas)] += 1
                if vv == 1:
                    print('+' if yes else '.', end='', flush=True)

        duration = perf_counter() - start_time
        if vv == 1:
            print(flush=True)

        return FidelityTestResult(
            tspec=self.tspec,
            rmem=self.rmem,
            cue_maker=self.canvas_to_cue,  # type: ignore[misc, arg-type]
            results=results,  # type: ignore[arg-type]
            duration=duration,
            num_tests=num_tests,
            seed=seed)
Exemple #13
0
 def evaluate_params(self):
     eval_results = {}
     for param in self.params:
         hogInfer = hog_predictor(util.params_to_filename([self.sign] + list(param)),\
                                  use_cuda = self.use_cuda)
         results = self.evaluate_model(hogInfer)
         _, _, overall_auc = util.pr(results['labels'], results['scores'], \
                               misses = results['misses'], plot=False)
         overall_time = np.array(results['times']).mean()
         f = str(param[0]) + '_' + str(param[1][0]) + '_' +\
                 str(param[2][0]) + '_' + str(param[3])
         eval_results[f] = (overall_auc, overall_time)
         print(
             f'{f} have a time of {overall_time} and AUC of {overall_auc}')
     return eval_results
Exemple #14
0
    def evaluate_model(self, hogInfer):
        "Evaluate the model by looping over imgs loaded from the folder"
        # Loop over all images in the dataset
        all_labels = []
        all_scores = []
        all_times = []
        negs = []
        misses = 0

        for t, image in enumerate(self.imgs):
            # Pick all the gt boxes in the selected image
            gt_boxes = np.array(self.pos_rects[t])
            start = timer()
            # Run the detector
            boxes, scores = hogInfer.predict(image, self.n,
                                             self.overlap_thresh)
            end = timer()
            thistime = end - start
            # Evaluate the detector and plot the results
            results = self.eval_detections(gt_boxes, boxes)
            all_labels.append(results['labels'])
            all_scores.append(scores)
            all_times.append(thistime)
            misses += results['misses']

            # Compute the per-image AP
            _, _, ap = util.pr(results['labels'],
                               scores,
                               misses=results['misses'],
                               plot=False)
            print(
                f"Evaluating on image {t+1:3d} of {len(self.imgs):3d}: AP: {ap*100:6.1f}%"
            )

        return {
            'labels': np.concatenate(all_labels, axis=0),
            'scores': np.concatenate(all_scores, axis=0),
            'misses': misses,
            'negatives': negs,
            'times': all_times
        }
Exemple #15
0

@dataclass(frozen=True)
class Blank(FeatureWrapper):
    index: int = None  # which Blank

    def __str__(self):
        return f'Blank({self.feature}, {self.index})'


augnodes = [  # nodes with which to augment the slipnet
    Blank(Before(n), i) for n in [4, 5, 6] for i in range(3)
]
slipnet.add_layer2_nodes(augnodes)
sm = set(
    frozenset([a, b]) for a in augnodes for b in augnodes
    if a != b and a.feature == b.feature)
mut_inh(sm)

#pr(slipnet)
args456 = [Before(4), Before(5), Before(6), After(15)]
pr(q(args456))
##pr(q(args456, type=Blank))

args = [Before(4), Before(5), After(15)]
#pts(q(args, type=Blank))
#pr(augnodes)
#print()
#pr(sm)
#pr(slipnet.qnodes(Blank))
Exemple #16
0
                result[node] = ts
            ts.add(subt, a)
    return list(result.values())

def tsplot(*tss: NodeTimeseries) -> None:
    '''Put a time-series plot on the screen.'''
    plt.ion()
    plt.clf()
    plt.xlabel('t')
    plt.ylabel('a')
    for ts in tss:
        ts.plot()
    max_t = max((ts.max_t() for ts in tss), default=0)
    max_a = max((ts.max_a() for ts in tss), default=0.0)
    plt.axis([0, max_t, 0, max_a])
    plt.legend()
    

if __name__ == '__main__':
    import sys

    try:
        filename = sys.argv[1]
    except IndexError:
        filename = 'Want.csv'
    tss = readcsv(filename)
    z = nlargest(20, tss, lambda ts: ts.max_a())
    #tsplot(*tss)
    tsplot(*z)
    pr(z)
Exemple #17
0
            masks[:, :,
                  i] = asarray(m.split()[0]) / 255  # alpha channel?  Nooo!

        return masks, asarray(class_ids, dtype='int32')

    # load an image reference
    def image_reference(self, image_id):
        info = self.image_info[image_id]
        return info['path']


# prepare train set
train_set = DeepVisionDataset()
train_set.load_dataset(C.train_dirs, is_train=True)
train_set.prepare()
pr('Train: %d' % len(train_set.image_ids))

# prepare test/val set
test_set = DeepVisionDataset()
test_set.load_dataset(C.validation_dirs, is_train=False)
test_set.prepare()

pr('Test: %d' % len(test_set.image_ids))
# prepare config
config = DeepVisionConfig()
config.display()

from tensorflow.keras.callbacks import CSVLogger
logger = CSVLogger("train.log", append=True, separator='\t')

# define the model, load weights and run training
Exemple #18
0
        agent = list(sample_without_replacement(
            [nas.node for nas in q],
            k=1,
            weights=[nas.a for nas in q]
        ))
        print(agent)
        '''

    if False:
        fm = Numbo()
        c = Consume(operands=(5, 4), operator=plus)
        ca = fm.build(SeqCanvas([SeqState((4, 5, 6), None)]))
        wa = fm.build(Want(15, canvas=ca, addr=0))
        #wa.go(fm)
        fm.do_timestep(num=20)
        pr(fm, edges=True)

        print()
        fm.pr(fm.search_ws(Consume, max_n=5))
        print()
        w = first(fm.elems(Want))
        cs = list(fm.elems(Consume))
        pts(cs)

    if False:
        fm = Numbo()
        ca = fm.build(SeqCanvas([SeqState((4, 5, 6), None)]))
        wa = fm.build(Want(15, canvas=ca, addr=0))
        cr0 = CellRef(ca, 0)
        cr1 = CellRef(ca, 1)
        c1 = fm.build(Consume(operands=(5, 4), operator=plus, source=cr0))
Exemple #19
0
 def pr_flows(self):
     pr(self.propagator.flows)
Exemple #20
0
if __name__ == '__main__':
    #    rmem = RMem.run(
    #        operands=range(1, 8),   # 4
    #        startc=(None, '+', 1, None, 3),
    #        prep=ndups(3),
    #        niters=1000
    #    )
    #rmtype = type('RMemAdjacent', (WithAdjacentRelativePainters, RMem), {})
    #rmem = rmtype(niters=30)
    rmem = RMemAbs.make_instance(
        (SkewedClarityWeight, WithAdjacentRelativePainters), niters=30)

    #p = rmem.painter_from_to(1, 2, 1, '+')
    #print(p)

    #p: Painter = (Match(1), Right(1), '+')
    #c = Canvas1D.make_from((1, None, None, None, None))
    #rmem.run_generator(c, p)
    #print(c)

    eqn = (1, '+', 1, '=', 2)
    pps = rmem.canvas_to_painters(eqn)
    #pr(pps)
    pset = rmem.painters_to_pset(pps)
    pr(pset)

    startc = (1, None, None, None, None)
    c = rmem.regenerate(startc, pset, vv=4)
    print(c)
Exemple #21
0
        A Cell or CellRef should store the threshold to paint on it. Possibly
        a minimum activation level for the painter, and/or 'the painter clearly
        beats its competition.'

        ImLitPainter: represents a move that we can't do yet because we lack
        avails; not as promising as a LitPainter that can really go

        build a sub-Want
        'slip' a Want?

        Glomming
        NumberLine
        match/similarity measure

        """


if __name__ == '__main__':
    fm = TestFM(seed=1)
    ca = fm.build(SeqCanvas([SeqState((4, 5, 6), None)]))
    cr0 = CellRef(ca, 0)  # type: ignore[arg-type]
    wa = fm.build(Want(target=15,
                       startcell=cr0,
                       sk=RaiseException(TestFoundIt)),
                  min_a=4.0)
    fm.do_timestep(until=40)
    #pr(fm, edges=True, seed=True, extra=True)
    pr(fm, (LitPainter, Want), edges=True, extra=True)
    pr(fm, SeqCanvas)
Exemple #22
0
 def pr(self, *args, **kwargs) -> None:
     for ts in self.tsd.values():
         pr(ts)
Exemple #23
0
 def pr_flows(self) -> None:
     pr(self.propagator.flows)
Exemple #24
0
from config import DeepVisionConfig, class_names
import config as C

# Directory of images to run detection on

conf = DeepVisionConfig()
conf.BATCH_SIZE = 1
conf.IMAGES_PER_GPU = 1
conf.display()

# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir='./', config=conf)
weights, last_epoch = find_last(model)
out_dir = os.path.join(os.path.dirname(weights), 'test_output_' + last_epoch)

pr('    Using weights from: ', weights)
pr('    Test images from:', C.test_dirs)
pr('    Writing output to: ', out_dir)

os.mkdir(out_dir)

# Load weights trained on MS-COCO
model.load_weights(weights, by_name=True)

for d in C.test_dirs:
    for root, dirs, files in os.walk(d):
        for f in files:
            try:
                image = skimage.io.imread(os.path.join(root, f))
            except:
                pr('    Ignoring file: ' + root + ' ' + f)