Ejemplo n.º 1
0
def model_network(param_dict):
    """
    This model network consists of a spike source and a neuron (IF_curr_alpha). 
    The spike rate of the source and the weight can be specified in the 
    param_dict. Returns the number of spikes fired during 1000 ms of simulation.
    
    Parameters:
    param_dict - dictionary with keys
                 rate - the rate of the spike source (spikes/second)
                 weight - weight of the connection source -> neuron
                 
    Returns:
    dictionary with keys:
        source_rate - the rate of the spike source
        weight - weight of the connection source -> neuron
        neuron_rate - spike rate of the neuron
    """
    #set up the network
    from retina import Retina
    retina = Retina(param_dict['N'])
    params = retina.params
    params.update(param_dict)  # updates what changed in the dictionary
    # simulate the experiment and get its data
    data = retina.run(params)  #,verbose=False)
    neuron_rate = data['out_ON_DATA'].mean_rate()
    print neuron_rate
    # return everything, including the input parameters
    return {
        'snr': param_dict['snr'],
        'kernelseed': param_dict['kernelseed'],
        'neuron_rate': neuron_rate
    }
Ejemplo n.º 2
0
 def __init__(self, content, retina=False):
     self.CAMERA_INITIAL_ANGLE_V = deg2rad(10.0)
     # TODO: 他の環境も指定できるようにする
     self.content = content
     self.env = Environment(self.content)
     self.egocentric_images = None
     self.allocentric_images = None
     self.retina = Retina() if retina else None
Ejemplo n.º 3
0
def test_view_buffer():
    r = Retina()
    code = """for i in [1, 2, 3, 4]:
    print "The count is", i
    print "Done counting"""

    buf = file_to_text_buffer(StringIO(code))
    s = r.view_buffer(buf, x=7, y=1)
    assert_equal(s, "** ^The count **^")
Ejemplo n.º 4
0
 def __init__(self, weights, classes=['building']):
     self.net = Retina(classes).eval().cuda()
     chkpnt = torch.load(weights)
     self.net.load_state_dict(chkpnt['state_dict'])
     self.transform = transforms.Compose([
         transforms.Resize((300, 300)),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
Ejemplo n.º 5
0
    def __init__(self,
                 text_buffer,
                 retina=None,
                 pos=(0, 0),
                 char_vis=(0.226, 0.404)):
        self.text_buffer = text_buffer
        self.retina = retina
        self.pos = pos
        self.char_vis = char_vis

        if self.retina is None:
            self.retina = Retina()
Ejemplo n.º 6
0
def test_view_string():
    r = Retina()

    # Empty string (whitespace for all retina slots)
    s = r.view_string("")
    assert_equal(s, "".join([Retina.LOW_WHITESPACE] * len(r.slots)))

    # Letters
    s = r.view_string(" Hello World")
    assert_equal(s, " ***lo World     ")

    # Numbers
    s = r.view_string("12 This is a test")
    assert_equal(s, "## *his is a ****")
Ejemplo n.º 7
0
 def __init__(self, weights, classes=['building'], cuda = True):
     chkpnt = torch.load(weights)
     self.config = chkpnt['args']
     self.net = Retina(self.config).eval()
     self.net.load_state_dict(chkpnt['state_dict'])
     self.transform = transforms.Compose([
         transforms.Resize((self.config.model_input_size, self.config.model_input_size)),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
     ])
     self.net = self.net.cuda()
     self.net.anchors.anchors = self.net.anchors.anchors.cuda()
     torch.set_default_tensor_type('torch.cuda.FloatTensor')
     self.cuda = cuda
Ejemplo n.º 8
0
        net = Perceptrons(perceptron_pop, feature_pop)
        net.matrix_learning_pot[:] = 0
        net.upload_config()
        #test
        #syn = feature_pop.synapses['programmable'][::16]
        #stim = syn.spiketrains_poisson(10)
        #nsetup.stimulate(stim,send_reset_event=False)

        #set up filters and connect retina
        inputpop = pyNCS.Population('', '')
        inputpop.populate_by_id(nsetup, 'mn256r1', 'excitatory',
                                np.linspace(0, 255, 256))
        #reset multiplexer
        chip.configurator._set_multiplexer(0)
        ret = Retina(inputpop)
        ret._init_fpga_mapper()
        pre_teach, post_teach, pre_address, post_address = ret.map_retina_to_mn256r1_randomproj(
        )
        nsetup.chips['mn256r1'].load_parameters(
            'biases/biases_wijlearning_ret_perceptrons_1.biases')

        #two different biases for teacher and inputs
        #matrix_w = np.zeros([256,256])
        #matrix_w[:,0:128]  = 2
        #matrix_w[:,128:256]  = 1
        #nsetup.mapper._program_onchip_programmable_connections(matrix_w)

        #retina, pre_address, post_address  = ret.map_retina_to_mn256r1_macro_pixels(syntype='learning')
        #on off retina nsetup.mapper._program_detail_mapping(2**6) on -> 7
        is_configured = True
Ejemplo n.º 9
0
                        default=300,
                        type=int,
                        help='Input dimensions for SSD')
    args = parser.parse_args()

    if 'VOC' in args.train_data:
        dataset = VOC(args.train_data, transform=Transform(args.ssd_size))
    else:
        dataset = SpaceNet(args.train_data, transform=Transform(args.ssd_size))

    args.checkpoint_dir = os.path.join(args.save_folder,
                                       'ssd_%s' % datetime.now().isoformat())
    args.means = (104, 117, 123)  # only support voc now
    args.num_classes = len(dataset.classes) + 1
    args.stepvalues = (20, 50, 70)
    args.start_iter = 0
    args.writer = SummaryWriter()

    os.makedirs(args.save_folder, exist_ok=True)

    default_type = 'torch.cuda.FloatTensor' if args.cuda else 'torch.FloatTensor'
    torch.set_default_tensor_type(default_type)

    net = Retina(dataset.classes, args.ssd_size)

    if args.cuda:
        net = net.cuda()

    load_checkpoint(net, args)
    train(net, dataset, args)
Ejemplo n.º 10
0
        color = label_color(labels[i])
        draw_box(img, box, color=color)

        caption = "{} {:.3f}".format(labels_to_names[labels[i]], scores[i])
        draw_caption(img, box, caption)

    return img


if __name__ == '__main__':
    labels_path = './models/coco.names'
    labels_to_names = open(labels_path).read().strip().split("\n")

    robot = Robot()
    yolo_detector = Yolo()
    retina_detector = Retina()

    robot.start()
    while True:
        frame = robot.getFrame()

        frame = draw(yolo_detector, retina_detector, frame)
        cv2.imshow('ai2thor', frame)

        key = chr(cv2.waitKey(0))
        if key == 'q':
            break
        robot.apply(key)

    robot.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 11
0
def test_view_line():
    r = Retina()
    s = r.view_line("x = [2, 8, 7, 9, -5, 0, 2]", 2)
    assert_equal(s, "- (#, 8, 7, 9. -#")
Ejemplo n.º 12
0
                        help='Path to training data')
    parser.add_argument('--data_dir',
                        default=None,
                        help='Directory of training data')
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    args.cuda = args.gpu is not None

    default_type = 'torch.cuda.FloatTensor' if args.cuda else 'torch.FloatTensor'
    torch.set_default_tensor_type(default_type)

    DS_Class = VOC if 'VOC' in args.train_data else SpaceNet

    net = Retina(args)
    dataset = DS_Class(args.train_data,
                       Transform(args, net.anchors),
                       args,
                       root_dir=args.data_dir)

    args.checkpoint_dir = os.path.join(args.save_folder,
                                       'ssd_%s' % datetime.now().isoformat())
    args.start_iter = 0

    if args.resume:
        args.checkpoint_dir = os.path.dirname(args.resume)

    os.makedirs(args.save_folder, exist_ok=True)

    if args.cuda: