class Controller(Thread): """PPM output controller powered by pigpio **Start the pigpio daemon before running: sudo pigpiod** Arguments: input_queue -- Queue to trans gpio -- Number of output pin, equivalent to GPIO.BCM (GPIOX) channel -- Number of PPM channel (8 default) frame_ms -- Time interval between frames in microsecond (5 minimum, 20 default) Source: https://www.raspberrypi.org/forums/viewtopic.php?t=219531 """ def __init__(self, input_queue, gpio, channels=8, frame_ms=20, gpio_sonic=19): Thread.__init__(self) self._input_queue = input_queue self._gpio = gpio self._channels = channels self._pi = get_only(pigpio.pi) if not self._pi.connected: print('Error: pigpio is not initialized') exit(0) self._ppm = PPM(self._pi, self._gpio, channels=channels, frame_ms=frame_ms, gpio_sonic=gpio_sonic) # Default output signal for stablizing self._ppm.update_channels([1500, 1500, 1100, 1500, 1500, 1500, 1500, 1500]) self.daemon = 1 self.start() def run(self): while 1: signals = self._input_queue.get() self._ppm.update_assign(signals)
def __init__(self, debug=0): # just one buffer because we just need the last value self.output_count = 0 self.output_queue = Queue(1) self.debug = debug # self.cap = cv2.VideoCaptures(0) if not debug: try: self._pi = get_only(pigpio.pi) self._pi.wave_tx_stop() self.sonic = Sonic() self.lidar = TFMiniLidar(TF_PORT, debug=0) except: raise IOError self.hight = 130 PPM(self.output_queue, 13) # sanity check sleep(0.1) print('Sanity check -- Sonar value:', self.sonic.value) if self.sonic.value == 0: print('Error: Sonar is not working') raise IOError # ------------------------- self.yaw_pid = PID(kp=1.2) self.pitch_pid = PID(kp=1, windup_guard=50) self.roll_pid = PID(kp=0, ki=0.40, kd=0, debug=True)
def __init__(self): # just one buffer because we just need the last value print('init plane') self.output_count = 0 self.output_queue = Queue(1) # self.cap = cv2.VideoCaptures(0) try: self._pi = ins.get_only(pigpio.pi) self._pi.wave_tx_stop() self.sonic = Sonic() self.lidar = TFMiniLidar(TF_PORT, debug=DEBUG) except: raise IOError self.hight = 130 PPM(self.output_queue, 13) # sanity check sleep(0.1) print('Sanity check -- Sonar value:', self.sonic.value) if self.sonic.value == 0: print('Error: Sonar is not working') raise IOError print('all peripheral inited') # ------------------------- self.yaw_pid = PID(kp=0.7) self.pitch_pid = PID(kp=0.35, ki=0.3, kd=0.35) self.roll_pid = PID(kp=0.55, ki=0.3, kd=0.25)
def __init__(self, input_queue, gpio, channels=8, frame_ms=20, gpio_sonic=19): Thread.__init__(self) self._input_queue = input_queue self._gpio = gpio self._channels = channels self._pi = get_only(pigpio.pi) if not self._pi.connected: print('Error: pigpio is not initialized') exit(0) self._ppm = PPM(self._pi, self._gpio, channels=channels, frame_ms=frame_ms, gpio_sonic=gpio_sonic) # Default output signal for stablizing self._ppm.update_channels([1500, 1500, 1100, 1500, 1500, 1500, 1500, 1500]) self.daemon = 1 self.start()
def main(): """Main program logic.""" args = parse_args() log_level = args['log'] test_mode = args['test'] use_price_token = args['use_price_token'] setup_logger(log_level, __file__) logger.info('Started app') if test_mode: logger.info('~~TEST MODE~~') global ppm ppm = PPM(use_price_token=use_price_token) # Run REST API app.run(debug=test_mode, threaded=False)
def main(): # Create Grid Object grid = Grid(1000, 1000) # Random Noise for i in range(grid.width): for j in range(grid.height): if ((i + j) % 2): grid.grid[i][j].set_red_num(randint(0, 255)) grid.grid[i][j].set_green_num(randint(0, 255)) grid.grid[i][j].set_blue_num(randint(0, 255)) # Create PPM ppm = PPM(grid) # Open file and write PPM f = open("pic.ppm", "w") f.write(str(ppm)) f.close()
def __init__(self, block, layers, num_classes=19, bins=(1, 2, 3, 6), norm_layer=nn.BatchNorm2d, bn_eps=1e-5, bn_momentum=0.1, deep_stem=False, stem_width=32, inplace=True, alpha=1): stem_width = int(np.rint(stem_width * alpha)) self.inplanes = stem_width * 2 if deep_stem else int( np.rint(64 * alpha)) super(ResNet, self).__init__() self.num_classes = num_classes if deep_stem: self.conv1 = nn.Sequential( nn.Conv2d(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False), norm_layer(stem_width, eps=bn_eps, momentum=bn_momentum), nn.ReLU(inplace=inplace), nn.Conv2d(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(stem_width, eps=bn_eps, momentum=bn_momentum), nn.ReLU(inplace=inplace), nn.Conv2d(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False), ) else: self.conv1 = nn.Conv2d(3, int(np.rint(64 * alpha)), kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(stem_width * 2 if deep_stem else int(np.rint(64 * alpha)), eps=bn_eps, momentum=bn_momentum) self.relu = nn.ReLU(inplace=inplace) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, norm_layer, int(np.rint(64 * alpha)), layers[0], inplace, bn_eps=bn_eps, bn_momentum=bn_momentum) self.layer2 = self._make_layer(block, norm_layer, int(np.rint(128 * alpha)), layers[1], inplace, stride=2, bn_eps=bn_eps, bn_momentum=bn_momentum) self.layer3 = self._make_layer(block, norm_layer, int(np.rint(256 * alpha)), layers[2], inplace, stride=2, bn_eps=bn_eps, bn_momentum=bn_momentum) self.layer4 = self._make_layer(block, norm_layer, int(np.rint(512 * alpha)), layers[3], inplace, stride=2, bn_eps=bn_eps, bn_momentum=bn_momentum) # self.aspp = build_aspp(int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), 8, norm_layer) # self.ppm2 = PPM(int(np.rint(128 * alpha)), int(int(np.rint(128 * alpha)) / len(bins)), bins, norm_layer) # self.ppm3 = PPM(int(np.rint(256 * alpha)), int(int(np.rint(256 * alpha)) / len(bins)), bins, norm_layer) # self.ppm4 = PPM(int(np.rint(512 * alpha)), int(int(np.rint(512 * alpha))/len(bins)), bins, norm_layer) self.ppm = PPM(int(np.rint(512 * alpha)), int(int(np.rint(512 * alpha)) / len(bins)), bins, norm_layer) self.fuse2_1 = InterFeatureFusion(int(np.rint(128 * alpha)), int(np.rint(64 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse3_1 = InterFeatureFusion(int(np.rint(256 * alpha)), int(np.rint(128 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse3_2 = InterFeatureFusion(int(np.rint(128 * alpha)), int(np.rint(64 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse4_1 = InterFeatureFusion(int(np.rint(512 * alpha)), int(np.rint(256 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse4_2 = InterFeatureFusion(int(np.rint(256 * alpha)), int(np.rint(128 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse4_3 = InterFeatureFusion(int(np.rint(128 * alpha)), int(np.rint(64 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.down2 = InterFeatureDownsample(int(np.rint(64 * alpha)), int(np.rint(128 * alpha)), scale=1 / 2, bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.down3 = InterFeatureDownsample(int(np.rint(64 * alpha)), int(np.rint(256 * alpha)), scale=1 / 4, bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.conv_low = ConvBnRelu(int(np.rint(64 * alpha)), int(np.rint(512 * alpha)), 4, 4, 0, has_bn=True, norm_layer=norm_layer, has_relu=True, inplace=inplace, has_bias=False) self.conv_high = ConvBnRelu(int(np.rint(64 * alpha)), int(np.rint(512 * alpha)), 4, 4, 0, has_bn=True, norm_layer=norm_layer, has_relu=True, inplace=inplace, has_bias=False) # low_in_channels, high_in_channels, out_channels, key_channels, value_channels, dropout # self.fusion = AFNB(int(np.rint(1024 * alpha)), 2048, 2048, int(np.rint(256 * alpha)), int(np.rint(256 * alpha)), dropout=0.05, sizes=([1]), norm_layer=norm_layer) self.fusion = AFNB(int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), int(np.rint(128 * alpha)), int(np.rint(128 * alpha)), dropout=0.05, sizes=([1]), norm_layer=norm_layer) # extra added layers self.context = nn.Sequential( # nn.Conv2d(2048, int(np.rint(512 * alpha)), kernel_size=3, stride=1, padding=1), # ModuleHelper.BNReLU(int(np.rint(512 * alpha)), norm_type=self.configer.get('network', 'norm_type')), ConvBnRelu(int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False), # APNB(in_channels=int(np.rint(512 * alpha)), out_channels=int(np.rint(512 * alpha)), key_channels=int(np.rint(256 * alpha)), value_channels=int(np.rint(256 * alpha)), # dropout=0.05, sizes=([1]), norm_type=self.configer.get('network', 'norm_type')) APNB(in_channels=int(np.rint(512 * alpha)), out_channels=int(np.rint(512 * alpha)), key_channels=int(np.rint(128 * alpha)), value_channels=int(np.rint(128 * alpha)), dropout=0.05, sizes=([1]), norm_layer=norm_layer)) self.head = nn.Sequential( ConvBnRelu(int(np.rint(512 * alpha)) + int(np.rint(64 * alpha)), int(np.rint(64 * alpha)), 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False), nn.Conv2d(int(np.rint(64 * alpha)), int(np.rint(64 * alpha)), kernel_size=1, stride=1, padding=0, bias=True), nn.Dropout2d(0.1) #added to prevent overfitting ) self.depthwise_conv = nn.Conv2d(int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), kernel_size=3, stride=1, padding=1, groups=int(np.rint(512 * alpha)), bias=True)
from graphics import * from ppm import PPM from raytracer_part3 import raytracer # Build the Spheres that will be in our world S1 = Sphere(Point3D(300, 200, 200), 100, ColorRGB(1.0, 0.2, 0.4)) S2 = Sphere(Point3D(-200, -100, 50), 35, ColorRGB(0.3, 0.8, 0.2)) S3 = Sphere(Point3D(50, 20, 100), 25, ColorRGB(0.4, 0.1, 0.4)) S4 = Sphere(Point3D(300, -200, 600), 250, ColorRGB(0.6, 0.6, 0.4)) S5 = Sphere(Point3D(400, 400, 900), 400, ColorRGB(0.0, 0.2, 1.0)) # Build the Planes that will be in our world P1 = Plane(Point3D(50, 50, 999), Normal(0, 0, 1), ColorRGB(0.8, 0.8, 0.8)) P2 = Plane(Point3D(50, 50, 900), Normal(1, 1, 1), ColorRGB(1.0, 1.0, 1.0)) vp = ViewPlane(Point3D(50, 50, -50), Normal(-0.2, 0, 1), 200, 100, 1.0) objects = [] objects.append(S1) objects.append(S2) objects.append(S3) objects.append(S4) objects.append(S5) objects.append(P1) objects.append(P2) rt = raytracer(vp, objects) rt.color_plane() camera = rt.get_viewplane() PPM(camera, 'part4_5.ppm')
def __init__(self, block, layers, num_classes=19, bins=(1, 2, 3, 6), norm_layer=nn.BatchNorm2d, bn_eps=1e-5, bn_momentum=0.1, deep_stem=False, stem_width=32, inplace=True, alpha=1): stem_width = int(np.rint(stem_width * alpha)) self.inplanes = stem_width * 2 if deep_stem else int( np.rint(64 * alpha)) super(ResNet, self).__init__() self.num_classes = num_classes if deep_stem: self.conv1 = nn.Sequential( nn.Conv2d(3, stem_width, kernel_size=3, stride=2, padding=4, bias=False), norm_layer(stem_width, eps=bn_eps, momentum=bn_momentum), nn.ReLU(inplace=inplace), nn.Conv2d(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(stem_width, eps=bn_eps, momentum=bn_momentum), nn.ReLU(inplace=inplace), nn.Conv2d(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False), ) else: self.conv1 = nn.Conv2d(3, int(np.rint(64 * alpha)), kernel_size=3, stride=2, padding=4, bias=False) self.bn1 = norm_layer(stem_width * 2 if deep_stem else int(np.rint(64 * alpha)), eps=bn_eps, momentum=bn_momentum) self.relu = nn.ReLU(inplace=inplace) #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, norm_layer, int(np.rint(64 * alpha)), layers[0], inplace, bn_eps=bn_eps, bn_momentum=bn_momentum) self.layer2 = self._make_layer(block, norm_layer, int(np.rint(128 * alpha)), layers[1], inplace, stride=2, bn_eps=bn_eps, bn_momentum=bn_momentum) self.layer3 = self._make_layer(block, norm_layer, int(np.rint(256 * alpha)), layers[2], inplace, stride=2, bn_eps=bn_eps, bn_momentum=bn_momentum) self.layer4 = self._make_layer(block, norm_layer, int(np.rint(512 * alpha)), layers[3], inplace, stride=2, bn_eps=bn_eps, bn_momentum=bn_momentum) self.ppm = PPM(int(np.rint(512 * alpha)), int(np.rint(512 * alpha) // len(bins)), bins, norm_layer) self.fuse2_1 = InterFeatureFusion(int(np.rint(128 * alpha)), int(np.rint(64 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse3_1 = InterFeatureFusion(int(np.rint(256 * alpha)), int(np.rint(128 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse3_2 = InterFeatureFusion(int(np.rint(128 * alpha)), int(np.rint(64 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse4_1 = InterFeatureFusion(int(np.rint(512 * alpha)), int(np.rint(256 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse4_2 = InterFeatureFusion(int(np.rint(256 * alpha)), int(np.rint(128 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.fuse4_3 = InterFeatureFusion(int(np.rint(128 * alpha)), int(np.rint(64 * alpha)), bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.down2 = InterFeatureDownsample(int(np.rint(64 * alpha)), int(np.rint(128 * alpha)), scale=0.5, bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.down3 = InterFeatureDownsample(int(np.rint(64 * alpha)), int(np.rint(256 * alpha)), scale=0.25, bn_eps=bn_eps, bn_momentum=bn_momentum, norm_layer=norm_layer) self.conv_low = ConvBnRelu_DW(int(np.rint(64 * alpha)), int(np.rint(512 * alpha)), 4, 4, 1, norm_layer=norm_layer) self.conv_high = ConvBnRelu_DW(int(np.rint(64 * alpha)), int(np.rint(512 * alpha)), 4, 4, 1, norm_layer=norm_layer) self.fusion = AFNB(int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), int(np.rint(128 * alpha)), int(np.rint(128 * alpha)), dropout=0.05, sizes=([1]), norm_layer=norm_layer) self.context = nn.Sequential( ConvBnRelu_DW(int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False), APNB(in_channels=int(np.rint(512 * alpha)), out_channels=int(np.rint(512 * alpha)), key_channels=int(np.rint(128 * alpha)), value_channels=int(np.rint(128 * alpha)), dropout=0.05, sizes=([1]), norm_layer=norm_layer)) self.head = nn.Sequential( ConvBnRelu_DW(int(np.rint((512 + 64) * alpha)), int(np.rint(64 * alpha)), 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False), nn.Conv2d(int(np.rint(64 * alpha)), int(np.rint(64 * alpha)), kernel_size=1, stride=1, padding=0, bias=True), nn.Dropout2d(0.25) # added to prevent overfitting, 0.1->0.2 ) self.depthwise_conv = nn.Conv2d(int(np.rint(512 * alpha)), int(np.rint(512 * alpha)), kernel_size=3, stride=1, padding=1, groups=int(np.rint(512 * alpha)), bias=True)
import graphics as g from ppm import PPM myViewPlane = g.ViewPlane(g.Point3D(0, 0, 0), g.Normal(0, 0, 1), 3, 4, 1) myViewPlane.set_color(0, 0, g.ColorRGB(1, 1, 1)) myViewPlane.set_color(0, 2, g.ColorRGB(1, 0, 0)) myViewPlane.set_color(1, 1, g.ColorRGB(1, 0, 1)) myViewPlane.set_color(1, 2, g.ColorRGB(1, 1, 0)) myViewPlane.set_color(3, 0, g.ColorRGB(0, 1, 0)) myViewPlane.set_color(3, 2, g.ColorRGB(0, 0, 1)) PPM(myViewPlane, 'part2-testing.ppm')
from raytracer_part3 import raytracer # Build the Spheres that will be in our world S1 = Sphere(Point3D(300,200,200), 100, ColorRGB(1.0,0.2,0.4)) S2 = Sphere(Point3D(-200,-100,50), 35, ColorRGB(0.3,0.8,0.2)) S3 = Sphere(Point3D(50,20,100), 25, ColorRGB(0.4,0.1,0.4)) S4 = Sphere(Point3D(300,-200,600), 250, ColorRGB(0.6,0.6,0.4)) S5 = Sphere(Point3D(400,400,900), 400, ColorRGB(0.0,0.2,1.0)) # Build the Planes that will be in our world P1 = Plane(Point3D(50,50,999), Normal(0,0,1), ColorRGB(0.8,0.8,0.8)) P2 = Plane(Point3D(50,50,900), Normal(1,1,1), ColorRGB(1.0,1.0,1.0)) vp = ViewPlane(Point3D(0,0,0), Normal(0,0,1), 640, 480, 1.0) Camera1 = Ray(Point3D(0,0,-100), Normal(0,0,1)) objects = [] objects.append(S1) objects.append(S2) objects.append(S3) objects.append(S4) objects.append(S5) objects.append(P1) objects.append(P2) rt = raytracer(vp, objects) rt.color_plane_challenge(Camera1) camera = rt.get_viewplane() PPM(camera, 'part5_1.ppm')
#Our orthographic viewing plane #world = ViewPlane(Point3D(0,0,0), Normal(0,0,1), 200, 100, 1.0) #world = ViewPlane(Point3D(50,50,-50), Normal(0,0,1), 200, 100, 1.0) #world = ViewPlane(Point3D(50,50,-50), Normal(1,1,1), 200, 100, 1.0) #world = ViewPlane(Point3D(0,0,0), Normal(0,0,1), 640, 480, 1.0) world = ViewPlane(Point3D(50,50,-50), Normal(-0.2,0,1), 200, 100, 1.0) mins = [] cols,rows = world.get_resolution() for row in range(rows): mins.append([]) for col in range(cols): #j and i may be backwards #(row,col) ray = world.orthographic_ray(row,col) mins[row].append(0) #goes throught all objects that we have created and checks there t values for the smallest for k in range(len(world_objects)): if_hit,t,where,color = world_objects[k].hit(ray,1.0) if(if_hit is True): if mins[row][col] == 0 and t > 0: mins[row][col] = t world.set_color(row,col,color) elif t < mins[row][col]: mins[row][col] = t world.set_color(row,col,color) PPM(world, 'ex5.ppm')