Example #1
0
    def __init__(self, master, init_size=(WINDOW_WIDTH, WINDOW_HEIGHT)):
        '''
        Arguments:
            master: a tk.Tk object that manages the window
            init_size: a tuple of two non-negative integers representing the
            initial size of the game window (width, height) in pixels

        Creates and populates all of the GUI elements of the game.
        '''
        self.master = master
        self.width, self.height = init_size
        self.master.geometry(f'{self.width}x{self.height}')

        self.create_widgets()
        self.create_menu_buttons()
        self.animator = Animator(self)
        self.master.bind('<Key>', self.key_handler)

        # Whether a game square has been clicked
        self.square_clicked = None

        # A GameState object
        self.game_state = gs.GameState()
        # A stack of game states, for the purpose of undoing moves
        self.state_stack = []
        # Number of moves made, undos do not reset
        self.moves = 0

        self.send_message('Hello! To start playing, please load a puzzle file.'+
            ' Many pre-built examples are included.' +
            ' For more info, such as keybinds, see readme.txt.')
Example #2
0
    def load_log_file(self, file_name):
        self.scene = Graphics_scene(self)
        self.visualizer_view.setScene(self.scene)

        self.session = Session()
        self.road_network = Network()
        self.animator = Animator(self.scene)
        self.city = City(self.animator)

        if file_name.endswith(".gz"):
            input_file = GzipFile(file_name)
        elif file_name.endswith(".bz2"):
            input_file = BZ2File(file_name)
        else:
            input_file = open(file_name, 'r')
        for line in input_file:
            if self.session.parse(line):
                continue
            if self.road_network.parse(line):
                continue
            if self.city.parse(line):
                continue
        input_file.close()

        self.road_network.resolve()
        self.scene.draw_road_network(self.road_network)

        self.set_ready_to_run(file_name)
Example #3
0
    def getAnimatorViews(self):
        """
        Return a list of the animator views defined by the layout.
        If there is more than one animator view in the current
        application layout, each one can be accessed via its own element
        in the list. For example, if there are 3 animator views in the
        GUI, the following sequence of commands can be used to display a
        different channel in each one: 

            a = v.getAnimatorViews()
            a[0].setChannel(1)
            a[1].setChannel(110)
            a[2].setChannel(5)

        Returns
        -------
        list
            A list of Animator objects.
        """
        commandStr = "getAnimatorViews"
        animatorViewsList = self.con.cmdTagList(commandStr)
        animatorViews = []
        if (animatorViewsList[0] != ""):
            for av in animatorViewsList:
                animatorView = Animator(av, self.con)
                animatorViews.append(animatorView)
        return animatorViews
Example #4
0
 def __init__(self):
     self.animator = Animator(looking_at=(0, 0, 0),
                              looking_from=(0, -50, -50),
                              up_vector=(0, 0, 1),
                              latitude=45.0,
                              longitude=45.0,
                              distance=(50 * sqrt(2.0)))
     self.spherical = False
     self.prepare_args()
Example #5
0
 def __init__(self, ent, anim):
     # ent can be None if all of the entity manipulating methods are
     # overidden.
     self.ent = ent
     self.stats = StatSet()
     self.stats.hp = 1
     self._animator = Animator()
     self._anim = anim
     self.direction = dir.DOWN  # arbitrary
     self.interruptable = True  # if false, no state changes will occur
     self.invincible = False
     self._state = self.defaultState().next
Example #6
0
    def __init__(self, ent, anim):
        'ent can be None if all of the entity manipulating methods (below) are overridden.'
        self.ent = ent
        self.stats = StatSet()
        self.stats.hp = 1

        self._animator = Animator()
        self._anim = anim
        self.direction = dir.DOWN  # as good as any
        self.interruptable = True  # if false, no state changes will occur
        self.invincible = False
        self.state = self.defaultState()
Example #7
0
 def __init__(self, source):
     """ image should be a spritesheet of square sprites """
     pygame.sprite.Sprite.__init__(self)
     self.source = source
     self.image = pygame.image.load(self.source.death_image).convert_alpha()
     #self.rect = Rect(coords[0], coords[1], self.image.get_height(), self.image.get_height())
     self.rect = source.rect
     self.screen = self.source.screen
     self.death_animator = Animator(self.screen, self.image, self.rect)
     self.countdown = self.death_animator.frame_count
     self.dir = (0,0)
     self.alive = True
     self.facing_right = True
Example #8
0
 def __init__(self, source, image, coords):
     """ image should be a spritesheet of square sprites """
     pygame.sprite.Sprite.__init__(self)
     self.source = source
     self.image = pygame.image.load(image).convert_alpha()
     self.rect = Rect(coords[0], coords[1], self.image.get_height(),
                      self.image.get_height())
     self.screen = self.source.screen
     self.move_animator = Animator(self.screen, self.image, self.rect)
     self.m_image = self.image.subsurface(self.move_animator.crop_init)
     self.mask = pygame.mask.from_surface(self.m_image)
     self.dir = (0, 0)
     self.alive = True
     self.speed = 1
Example #9
0
    def getLinkedAnimators(self):
        """
        Get the animators that are linked to this image view.

        Returns
        -------
        list
            A list of Animator objects.
        """
        resultList = self.con.cmdTagList("getLinkedAnimators",
                                         imageView=self.getId())
        linkedAnimatorViews = []
        if (resultList[0] != ""):
            for animator in resultList:
                linkedAnimatorView = Animator(animator, self.con)
                linkedAnimatorViews.append(linkedAnimatorView)
        return linkedAnimatorViews
Example #10
0
    def add_random_ghost_animation(self, room_id, path_id, start_position):

        ghost_path = "Well Escape tiles/ghostTiles/"
        ghost_sprite_sheet = random.choice(
            loadSave.load_files_form_directory(ghost_path, ".png"))
        # Animation_data -> [animator, room_id, path_id, start_cell_id, end_cell_id, forwards, (current position)]
        animation_data = [
            # Todo turn magic numbers in animations into constants
            Animator(ghost_sprite_sheet, library.scaleNum, 3, 7, 0.85),
            room_id,
            path_id,
            0,
            1,
            True,
            start_position
        ]

        self.ghost_sprite_animations.append(animation_data)
Example #11
0
    def start_animation(self, track, scene_num, *args):

        # print " "
        # print "STARTING ANIMATION"
        # print " "

        my_animator = Animator(*track)
        my_animator.loop = True
        self.animations[scene_num] = my_animator

        try:
            self.layers[scene_num].opacity = 0  # or 0 to start off
            my_animator.do(self.layers[scene_num])
            if platform == 'ios' or platform == 'android':
                self.app.appix_base.solid_layer.add_widget(self.layers[scene_num])
            else:
                self.app.main_stage.ids["content_preview"].add_widget(self.layers[scene_num])
        except AttributeError:
            "this was an attribute error"
            pass
Example #12
0
    def build(self):
        #initializing the elements from the .kv file
        self.designElements = DesignElements()
        self.main_carousel = self.designElements.ids['crsMain']
        self.btn_set_file_path = self.designElements.ids['btnSetFilepath']
        self.lbl_file_path = self.designElements.ids['lblFilePath']
        self.btn_refresh_members = self.designElements.ids['btnRefreshMembers']
        self.lbl_members_count = self.designElements.ids['lblMembersCount']
        self.im_group_pic = self.designElements.ids['imGroup']
        self.btn_next_group_pic = self.designElements.ids['btnNextGroup']
        self.lbl_news = self.designElements.ids['lblNews']
        self.lay_rules = self.designElements.ids['layRules']

        #the json store where permanent data is stored
        self.app_data_name = 'AppData.json'

        #creating the members carousel, to access it later in members carousel
        self.members_carousel = Carousel(direction='bottom', loop='True')

        #binding buttons with their callbacks
        self.btn_set_file_path.bind(on_press=self.showSetFilepathPopup)
        self.btn_refresh_members.bind(on_press=self.refreshCallback)
        self.btn_next_group_pic.bind(on_press=partial(self.changeGroupPic))

        #loading the currently stored data (if there is any)
        self.loadDataPath()

        #initialising the animator
        self.animator = Animator()

        #setting up the members by adding them into an array and then filling
        # the array in the method
        self.members = []
        self.refresh()

        #initialising the errors class // Not Functional at the moment
        self.error = Error()

        #kivy thing
        return self.designElements
Example #13
0
 def __init__(self, path, position, animator_options=None):
     super().__init__()
     self.animator = Animator(path, animator_options)  # создаем аниматор
     self.position = position  # позиция
     self.rect = self.animator.next_()[0].get_rect(
         topleft=position)  # определяем прямоугольник
Example #14
0
    def visualize_dbs(self,
                      n_epochs,
                      prop,
                      property_perc,
                      alternate,
                      animation_name='anim'):

        X_train_noisy_sc = StandardScaler().fit_transform(self.X_train_noisy)

        print('Creating animations...', end='')
        animator = Animator(X_train_noisy_sc,
                            self.y_train,
                            prop,
                            property_perc,
                            alternate,
                            n_epochs,
                            interval=100)

        criterion, weights_boosting = init_exponential_loss(X_train_noisy_sc)

        dbs_rand, train_losses_rand, train_accs_rand, \
        test_accs_rand, subsets_rand = self.toy_run_recompute(
            self.model_clean, self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=property_perc,
            most=True, random=True, alternate=False)
        dbs_top, train_losses_top, train_accs_top, \
        test_accs_top, subsets_top = self.toy_run_recompute(
            self.model_clean, self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=property_perc,
            most=True, random=False, alternate=alternate)
        dbs_bottom, train_losses_bottom, train_accs_bottom, \
        test_accs_bottom, subsets_bottom = self.toy_run_recompute(
            self.model_clean, self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=property_perc,
            most=False, random=False, alternate=alternate)
        dbs_all, train_losses_all, train_accs_all, \
        test_accs_all, subsets_all = self.toy_run_recompute(
            self.model_clean,
            self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=1.0,
            most=False, random=True, alternate=False)

        dbs = [dbs_rand, dbs_top, dbs_bottom, dbs_all]
        train_accs = [
            train_accs_rand, train_accs_top, train_accs_bottom, train_accs_all
        ]
        test_accs = [
            test_accs_rand, test_accs_top, test_accs_bottom, test_accs_all
        ]

        self.y_train = convert_labels(self.y_train, [-1, 1], [0, 1])
        self.y_valid = convert_labels(self.y_valid, [-1, 1], [0, 1])
        self.y_test = convert_labels(self.y_test, [-1, 1], [0, 1])
        criterion = nn.BCELoss()
        self.model_clean = FFSimpleNet(input_dim=self.n_features,
                                       output_dim=1,
                                       activation='sigmoid')
        optimizer = torch.optim.SGD(self.model_clean.parameters(), lr=0.01)
        self.train_accs_clean, self.val_accs_clean, self.test_accs_clean, epoch, model = early_stopping(
            self.model_clean,
            self.train_dl,
            self.valid_dl,
            self.test_dl,
            criterion=criterion,
            optimizer=optimizer,
            device=device,
            verbose=False)

        dbs_rand, train_losses_rand, train_accs_rand, \
        test_accs_rand, subsets_rand = self.toy_run_recompute(
            self.model_clean, self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=property_perc,
            most=True, random=True, alternate=False)
        dbs_top, train_losses_top, train_accs_top, \
        test_accs_top, subsets_top = self.toy_run_recompute(
            self.model_clean, self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=property_perc,
            most=True, random=False, alternate=alternate)
        dbs_bottom, train_losses_bottom, train_accs_bottom, \
        test_accs_bottom, subsets_bottom = self.toy_run_recompute(
            self.model_clean, self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=property_perc,
            most=False, random=False, alternate=alternate)
        dbs_all, train_losses_all, train_accs_all, \
        test_accs_all, subsets_all = self.toy_run_recompute(
            self.model_clean,
            self.X_train_noisy, self.y_train, self.X_test_noisy, self.y_test, n_epochs,
            criterion=criterion, prop=prop, property_perc=1.0,
            most=False, random=True, alternate=False)

        dbs = dbs + [dbs_rand, dbs_top, dbs_bottom, dbs_all]
        train_accs = train_accs + [
            train_accs_rand, train_accs_top, train_accs_bottom, train_accs_all
        ]
        test_accs = test_accs + [
            test_accs_rand, test_accs_top, test_accs_bottom, test_accs_all
        ]
        labels = [
            'rand_exp', 'top_exp', 'bottom_exp', 'all_exp', 'rand_bce',
            'top_bce', 'bottom_bce', 'all_bce'
        ]
        colors = [
            'orange', 'green', 'red', 'blue', 'orange', 'green', 'red', 'blue'
        ]
        markers = ['--', '--', '--', '--', '.-', '.-', '.-', '.-']
        animation = animator.run(dbs, train_accs, test_accs, labels, colors,
                                 markers)

        print('done!')
        print('Saving animation...', end='')
        animation.save('animations/{}={}_{}={}.gif'.format(
            prop, property_perc, 'alternate', alternate),
                       dpi=100)
        print('done!')
Example #15
0
    def __init__(self):

        # Initialize node, animator and state
        rospy.init_node('waypoint_planner', anonymous=True)
        self.animator = Animator(self)
        self.state = states.SteadyState(self)

        # Publisher/Subscriber variables
        self.car_speed = 0
        self.lane_heading = FloatMsgSmoother(10, 0)
        self.lane_width = 3  # meters
        self.lane_center_offset = FloatMsgSmoother(3, 0)
        self.lane_curvature = []
        self.lane_change = "none"
        self.left_lane_type = 0
        self.right_lane_type = 0
        self.obstacles = [0, 0, 0, 0, 0]

        self.obstacle_flags = [0, 0, 0, 0, 0]
        self.obstacle_detect_time = [0, 0, 0, 0, 0]
        self.saved_obstacle_pos = [0, 0, 0, 0, 0]
        self.obst_removal_time = [0, 0, 0, 0, 0]
        self.time_set = [0, 0, 0, 0, 0]
        self.obstacle_dist = [0, 0, 0, 0, 0]

        self.objects_detected = [False, False, False]

        self.odom_msg_received = False
        self.car_position = [0, 0]
        self.car_position_temp = [0, 0]

        self.car_heading = 0

        self.critical_waypoints = []
        self.curvature_waypoints = []
        self.position_history = []
        self.position_history_counter = 0
        self.stop_sign_distance = 25000.0
        self.horiz_line_distance = -1

        # Waypoint generation class members
        self.DISTANCE_BETWEEN_POINTS = 3  # meters

        # Animator class constants
        self.POSITION_HIST_DIST = 3  # meters
        self.POSITION_HIST_LENGTH = 100

        # Logging
        self.logger = logging.getLogger("planner")
        self.logger.setLevel(logging.DEBUG)

        handler = logging.StreamHandler(sys.stdout)
        FORMAT = '%(message)s'
        handler.setFormatter(logging.Formatter(FORMAT))
        self.logger.addHandler(handler)

        self.logger.info("Waypoint planner initialized.\n")

        # Subscribers
        self.car_speed_sub = rospy.Subscriber('/autodrive_sim/output/speed',
                                              Float32, self.car_speed_callback)

        self.lane_heading_sub = rospy.Subscriber(
            '/lane_detection/lane_heading', Float32,
            self.lane_heading_callback)
        self.lane_width_sub = rospy.Subscriber('/lane_detection/lane_width',
                                               Float32,
                                               self.lane_width_callback)
        self.lane_center_offset_sub = rospy.Subscriber(
            '/lane_detection/lane_center_offset', Float32,
            self.lane_center_offset_callback)
        self.lane_curvature_sub = rospy.Subscriber(
            '/lane_detection/lane_curvature', Float32MultiArray,
            self.lane_curvature_callback)
        self.lane_change_sub = rospy.Subscriber(
            '/lane_detection/lane_change/TEST', String,
            self.lane_change_callback)
        self.left_lane_type_sub = rospy.Subscriber(
            '/lane_detection/left_lane_type', Float32,
            self.left_lane_type_callback)
        self.right_lane_type_sub = rospy.Subscriber(
            '/lane_detection/right_lane_type', Float32,
            self.right_lane_type_callback)
        self.obstacle_sub = rospy.Subscriber('/lane_occupation',
                                             Int8MultiArray,
                                             self.obstacle_callback)
        self.obstacle_dist_sub = rospy.Subscriber('/obstacle_distance',
                                                  Float32MultiArray,
                                                  self.obstacle_dist_callback)

        self.car_heading_sub = rospy.Subscriber(
            '/autodrive_sim/output/heading', Float32,
            self.car_heading_callback)
        # self.self_objects_vector_sub = rospy.Subscriber('/lane_occupation', ByteMultiArray, self.detected_objects_callback)
        self.odom_sub = rospy.Subscriber("/odom", Odometry, self.odom_callback)
        self.position_sub = rospy.Subscriber('/autodrive_sim/output/position',
                                             Float32MultiArray,
                                             self.position_callback)
        self.stop_sign_dist_sub = rospy.Subscriber(
            '/sign_detection/stop_sign_distance', Float32,
            self.stop_sign_dist_callback)
        self.horiz_line_dist_sub = rospy.Subscriber(
            '/lane_detection/stop_line_distance', Float32,
            self.horiz_line_dist_callback)

        # Publisher
        self.waypoints_pub = rospy.Publisher('/autodrive_sim/output/waypoints',
                                             Float32MultiArray,
                                             queue_size=1)
        self.stop_dist_pub = rospy.Publisher('/waypoint_planner/stop_distance',
                                             Float32,
                                             queue_size=1)
        self.speed_factor = rospy.Publisher('/waypoint_planner/speed_factor',
                                            Float32MultiArray,
                                            queue_size=1)
        self.horizontal_line_activate_pub = rospy.Publisher(
            '/waypoint_planner/horizontal_line_detection', Bool, queue_size=1)
        self.lane_angle_pub = rospy.Publisher('/waypoint_planner/lane_angle',
                                              Float32,
                                              queue_size=1)
Example #16
0
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D

from animator import Animator
from cardioid import Cardioid
from circlegenerator import CircleGenerator
from doublefolium import DoubleFolium
from figureeigth import FigureEight
from lissajous import Lissajous
import matplotlib.pyplot as plt
from nephroid import Nephroid
from polar import Polar
from projection import RectangleProjection
from rhodonea import Rhodonea
from sextic import Sextic
from tricuspoid import Tricuspoid
from trifolium import Trifolium

numpoints = 100
Animator(DoubleFolium(), numpoints)('doublefolium.mp4')
Animator(Sextic(), numpoints)('sextic.mp4')
Animator(Lissajous(3, 2), numpoints)('lissajous.mp4')
Animator(Trifolium(), numpoints)('trifolium.mp4')
Animator(Tricuspoid(), numpoints)('tricuspoid.mp4')
Animator(Cardioid(), numpoints)('cardioid.mp4')
Animator(Nephroid(), numpoints)('nephroid.mp4')
Animator(Rhodonea(5), numpoints)('rhodonea5.mp4')
Animator(Rhodonea(3), numpoints)('rhodonea3.mp4')
Animator(Rhodonea(5), numpoints)('rhodonea6.mp4')
Animator(FigureEight(), numpoints)('figure8.mp4')
Example #17
0
def train_gcn(seed, epochs, num_splits):
    # this is made intentional for lazy loading
    # for convenience in handle errors in argparsing faster
    from typing import Generator
    import random
    import os
    import sys
    import datetime as dt
    from copy import copy, deepcopy

    import numpy as np
    import pandas as pd
    import networkx as nx
    from matplotlib import pyplot
    import matplotlib
    import seaborn as sns

    import torch
    import torch_geometric as tg

    from sklearn.decomposition import PCA
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.datasets import load_iris
    from sklearn.model_selection import train_test_split
    from sklearn.naive_bayes import GaussianNB

    from mpl_proc import MplProc, ProxyObject

    from gf_dataset import GasFlowGraphs
    from locations import Coordinates
    from models import MyNet3, MyNet2, MyNet, cycle_loss, cycle_dst2
    from models import cycle_loss
    from report import FigRecord, StringRecord, Reporter

    from seed_all import seed_all

    from animator import Animator

    class LineDrawer:
        def __init__(self, *, ax: matplotlib.axes.Axes, kw_reg, kw_min,
                     kw_train, kw_test):
            self.min_diff = float('inf')
            self.ax = ax
            self.kw_reg = kw_reg
            self.kw_min = kw_min
            self.kw_train = kw_train

            class FakeHline:
                def set(self, *args, **kwargs):
                    pass

            self.kw_test = kw_test
            self.min_train_hline, self.min_test_hline = FakeHline(), FakeHline(
            )

        def append(self, *, train_loss: float, test_loss: float):
            crt_diff = abs(test_loss - train_loss)
            if crt_diff < self.min_diff:
                self.min_diff = crt_diff
                self.min_train_hline.set(**self.kw_reg)
                self.min_test_hline.set(**self.kw_reg)
                self.min_train_hline = self.ax.hlines(**self.kw_train,
                                                      **self.kw_min,
                                                      y=train_loss)
                self.min_test_hline = self.ax.hlines(**self.kw_test,
                                                     **self.kw_min,
                                                     y=test_loss)
            else:
                self.ax.hlines(**self.kw_reg, **self.kw_train, y=train_loss)
                self.ax.hlines(**self.kw_reg, **self.kw_test, y=test_loss)

    print("[ Using Seed : ", seed, " ]")
    seed_all(seed)

    mpl_proc = MplProc()

    animator = Animator(mpl_proc)
    graph_dataset = GasFlowGraphs()
    lines = LineDrawer(ax=mpl_proc.proxy_ax,
                       kw_min=dict(),
                       kw_reg=dict(linewidth=0.3, color='gray'),
                       kw_train=dict(linestyle=':', xmin=300, xmax=400),
                       kw_test=dict(xmin=400, xmax=500))

    for seed in range(num_splits):
        # torch.manual_seed(seed)
        train_graphs, test_graphs = torch.utils.data.random_split(
            graph_dataset, (len(graph_dataset) - 20, 20))

        decision_tree = DecisionTreeClassifier(min_samples_leaf=6,
                                               max_depth=4,
                                               max_leaf_nodes=12)
        X = np.concatenate([g.edge_attr.T for g in train_graphs])
        y = np.concatenate([g.y for g in train_graphs])[:, 1]
        decision_tree.fit(X, y)
        predicted = decision_tree.predict(
            np.concatenate([g.edge_attr.T for g in test_graphs]))
        target = np.array([g.y[0, 1].item() for g in test_graphs])

        test_loss = cycle_loss(target, predicted)
        train_loss = cycle_loss(y, decision_tree.predict(X))

        if abs(test_loss - train_loss) < lines.min_diff:
            train_loader = tg.data.DataLoader(train_graphs,
                                              batch_size=len(train_graphs))
            test_loader = tg.data.DataLoader(test_graphs,
                                             batch_size=len(test_graphs))

        lines.append(test_loss=test_loss, train_loss=train_loss)

    lines = LineDrawer(ax=mpl_proc.proxy_ax,
                       kw_min=dict(),
                       kw_reg=dict(linewidth=0.3, color='gray'),
                       kw_train=dict(linestyle=':', xmin=100, xmax=200),
                       kw_test=dict(xmin=200, xmax=300))

    for seed in range(num_splits):
        train_graphs, test_graphs = torch.utils.data.random_split(
            graph_dataset, (len(graph_dataset) - 20, 20))
        gnb = GaussianNB()
        X = np.concatenate([g.edge_attr.T for g in train_graphs])
        y = np.concatenate([g.y for g in train_graphs])[:, 1]
        gnb.fit(X, y)
        predicted = gnb.predict(
            np.concatenate([g.edge_attr.T for g in test_graphs]))
        target = np.array([g.y[0, 1].item() for g in test_graphs])

        lines.append(test_loss=cycle_loss(target, predicted),
                     train_loss=cycle_loss(y, gnb.predict(X)))

    mynet = MyNet3()

    # seed_all(seed)
    train_graphs, test_graphs = torch.utils.data.random_split(
        graph_dataset, (len(graph_dataset) - 20, 20))
    train_loader = tg.data.DataLoader(train_graphs,
                                      batch_size=len(train_graphs))
    test_loader = tg.data.DataLoader(test_graphs, batch_size=len(test_graphs))

    optimizer = torch.optim.Adam(mynet.parameters(), lr=0.001)

    # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    # torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)

    def train_epochs():
        for epoch in range(epochs):
            train_loss = 0
            for batch in train_loader:
                # criterion = torch.nn.MSELoss()
                predicted = mynet(batch)

                loss = cycle_loss(predicted.flatten(), batch.y[:, 1].float())
                # loss = criterion(predicted, batch.y.float())
                train_loss += loss.item()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                # lr_scheduler.step()
            train_loss /= len(train_loader)
            yield train_loss

    class IntersectionFinder:
        def __init__(self):
            self.old = (None, None)

        def intersects(self, a: float, b: float) -> bool:
            old_a, old_b = self.old
            self.old = a, b
            if old_a is None:
                return False
            if a == b:
                return True
            return (old_a > old_b) != (a > b)

    intersections = IntersectionFinder()

    min_test_loss = float('inf')
    min_test_epoch = -1
    for epoch_no, train_loss in enumerate(train_epochs()):
        with torch.no_grad():
            test_loss = 0.0
            for batch in test_loader:
                predicted = mynet(batch)

                loss = cycle_loss(predicted.flatten(), batch.y[:, 1].float())
                test_loss += loss.item()
            test_loss /= len(test_loader)
            if test_loss < min_test_loss:
                min_test_loss = test_loss
                best = deepcopy(mynet)
                min_test_epoch = epoch_no
            if intersections.intersects(train_loss, test_loss):
                mpl_proc.proxy_ax.scatter(epoch_no,
                                          train_loss,
                                          s=100,
                                          marker='x',
                                          color='#3d89be')

        animator.add(train_loss, test_loss)

    fig: matplotlib.figure.Figure
    ax1: matplotlib.axes.Axes
    ax2: matplotlib.axes.Axes
    ax3: matplotlib.axes.Axes
    ax4: matplotlib.axes.Axes
    fig, ((ax1, ax2), (ax3, ax4)) = pyplot.subplots(ncols=2,
                                                    nrows=2,
                                                    sharey=True)

    def draw_tables(ax: matplotlib.axes.Axes, net: torch.nn.Module,
                    data: tg.data.DataLoader):
        table = np.full((13, 12), np.nan)
        for batch in data:
            predicted = net(batch)
            Y = batch.y[:, 0] - 2008
            M = batch.y[:, 1]
            table[Y, M] = cycle_dst2(M.float(),
                                     predicted.flatten().detach().numpy())**.5

        mshow = ax.matshow(table, vmin=0, vmax=6)
        ax.set(yticks=range(13), yticklabels=range(2008, 2021))
        return mshow

    mshow = draw_tables(ax1, mynet, train_loader)
    draw_tables(ax2, mynet, test_loader)
    draw_tables(ax3, best, train_loader)
    draw_tables(ax4, best, test_loader)

    fig.subplots_adjust(right=0.8)
    cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
    fig.colorbar(mshow, cax=cbar_ax)

    ax1.title.set_text('last')
    ax3.title.set_text(f'best {min_test_epoch}')

    def nxt_num() -> int:
        return sum(
            (1
             for n in os.listdir('experiments') if n.startswith('exp-1'))) + 1

    N = nxt_num()

    reporter = Reporter('report4.md')
    reporter.append(StringRecord(f'# {N}'))
    reporter.append(StringRecord(f'''
    ```
    {mynet}
    ```
    '''))
    reporter.append(FigRecord(fig, 'exp-2', f'experiments/exp-2-{N}.png'))
    reporter.append(
        FigRecord(mpl_proc.proxy_fig, 'exp-1', f'experiments/exp-1-{N}.png'))

    reporter.write()

    pyplot.show()
Example #18
0
from nonlinear import NonLinear
from animator import Animator
import numpy as np

RG = NonLinear()

RG.dt = 0.1
RG.times = np.arange(-10, 10, RG.dt)

RG.dz = 0.02
RG.z = np.arange(0, 50, RG.dz)

RG.T0 = 2
RG.a0 = np.exp(-RG.times**2 / RG.T0**2)

RG.integrate('c')

anim = Animator(RG)
anim.start_animation()
def oscillate_light(lightnum,param,vfrom,vto,ms):
    """Set up an oscillating lighting parameter"""
    light = GL_LIGHT0 + lightnum
    if light not in conditions:
        conditions[light] = Animator()
    conditions[light].oscillate(param,vfrom,vto,ms)
Example #20
0
    velocities = np.random.uniform(-0.25, 0.25,
                                   (num_balls, 2)).astype(np.float32)

    # Initialize grid indices:
    #
    # Each square in the grid stores the index of the object in that square, or
    # -1 if no object.  We don't worry about overlapping objects, and just
    # store one of them.
    grid_spacing = radius / np.sqrt(2.0)
    grid_size = int((1.0 / grid_spacing) + 1)
    grid = -np.ones((grid_size, grid_size), dtype=np.uint32)
    grid[(positions[:, 0] / grid_spacing).astype(int),
         (positions[:, 1] / grid_spacing).astype(int)] = np.arange(num_balls)

    # A matplotlib-based animator object
    animator = Animator(positions, radius * 2)

    # simulation/animation time variablees
    physics_step = 1.0 / 100  # estimate of real-time performance of simulation
    anim_step = 1.0 / 30  # FPS
    total_time = 0

    frame_count = 0

    # SUBPROBLEM 4: uncomment the code below.
    # preallocate locks for objects
    locks_ptr = preallocate_locks(num_balls)

    while True:
        with Timer() as t:
            update(positions, velocities, grid, radius, grid_size, locks_ptr,
Example #21
0
class Player(pyBehaviour.Transform):

    previous_position = (0, 0)

    # Player Setup
    move_speed = 0
    idle = True
    current_direction = library.BACKWARDS

    blocked_move_direct = {
        "forwards": False,
        "right": False,
        "backwards": False,
        "left": False
    }

    # Game Components
    time_manager = None
    get_world_position_funct = None

    # Animation setup
    SPRITE_SPACING = 3          # px
    SPRITES_COUNT = 7           # Per Sheet
    UPDATE_LENGTH = 0.75        # sec
    UPDATE_LENGTH_IDLE = 1.5    # sec

    # Move animations
    animation = ["", "", "", ""]
    animation[library.LEFT] = Animator(
        "Characters/girl_sideLeft_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH
    )

    animation[library.RIGHT] = Animator(
        "Characters/girl_sideRight_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH
    )

    animation[library.FORWARDS] = Animator(
        "Characters/girl_back_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH
    )

    animation[library.BACKWARDS] = Animator(
        "Characters/girl_front_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH
    )

    # idle animations
    idle_animation = ["", "", "", ""]
    idle_animation[library.LEFT] = Animator(
        "Characters/girl_sideLeftIdle_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH_IDLE
    )

    idle_animation[library.RIGHT] = Animator(
        "Characters/girl_sideRightIdle_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH_IDLE
    )

    idle_animation[library.FORWARDS] = Animator(
        "Characters/girl_backIdle_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH_IDLE
    )

    idle_animation[library.BACKWARDS] = Animator(
        "Characters/girl_frontIdle_spriteSheet.png",
        library.scaleNum,
        SPRITE_SPACING,
        SPRITES_COUNT,
        UPDATE_LENGTH_IDLE
    )

    def __init__(self, move_speed, scale, time_manager):

        self.move_speed = move_speed
        self.scale = scale
        self.time_manager = time_manager
        self.get_world_position_funct = None

    def block_move_direction(self, forwards, right, backwards, left):
        """"""

        self.blocked_move_direct["forwards"] = forwards
        self.blocked_move_direct["right"] = right
        self.blocked_move_direct["backwards"] = backwards
        self.blocked_move_direct["left"] = left

    def change_direction(self, last_dir, current_dir):
        """
        Reset the players animator if the direction changes.

        :param last_dir:        players direction from last frame
        :param current_dir:     players direction this frame
        :return:                current direction
        """
        if last_dir != current_dir:
            self.animation[last_dir].reset()
        return current_dir

    def animation_direction(self, last_direction, inputs):
        """
        Get the next animation direction.

        this prevents it from resetting if two keys are pressed at the same time!
        :param last_direction:  players last direction
        :param inputs:          user inputs.
        :return:                (Direction, idle)
        """
        # find if no keys are pressed and set it to idle
        idle = not inputs["left"] and not \
            inputs["right"] and not \
            inputs["forwards"] and not \
            inputs["backwards"]

        # if there's no keys pressed return early as there's nothing to test
        if idle:
            return last_direction, idle

        # set direction to last direction
        # in case there is opposite keys being pressed
        direction = last_direction

        # set to idle if both left and right keys are pressed
        if inputs["left"] and inputs["right"]:
            idle = True
        elif inputs["left"]:  # set left direction
            direction = library.LEFT
        elif inputs["right"]:  # set right direction
            direction = library.RIGHT

        # do forwards and backwards in separate if
        # as the animation trumps left and right
        # set to idle if both forwards and backwards keys are pressed
        if inputs["forwards"] and inputs["backwards"]:
            # set to idle if neither left or right is pressed
            idle = not inputs["left"] and not \
                inputs["right"]
        elif inputs["forwards"]:
            direction = library.FORWARDS  # set forwards direction
            idle = False
        elif inputs["backwards"]:
            direction = library.BACKWARDS  # set backwards direction
            idle = False

        return direction, idle

    def update(self, inputs):

        self.previous_position = self.position

        next_animation_direction, self.idle = self.animation_direction(
            self.current_direction,
            inputs
        )

        self.current_direction = self.change_direction(
            self.current_direction,
            next_animation_direction
        )

        pyBehaviour.Transform.update(self, inputs)

    '''
            if self.current_direction == library.FORWARDS:
                self.forwards()
            elif self.current_direction == library.RIGHT:
                self.right()
            elif self.current_direction == library.BACKWARDS:
                self.backwards()
            elif self.current_direction == library.LEFT:
                self.left()
    '''
    # Key Actions (up, down, left, right)

    def forwards(self):
        """Up key action"""

        if self.blocked_move_direct["forwards"]:
            return

        self.position[1] -= self.time_manager.delta_time * self.move_speed

    def right(self):
        """Right key action"""

        if self.blocked_move_direct["right"]:
            return

        self.position[0] += self.time_manager.delta_time * self.move_speed

    def backwards(self):
        """Down key action"""

        if self.blocked_move_direct["backwards"]:
            return

        self.position[1] += self.time_manager.delta_time * self.move_speed

    def left(self):
        """Left key action"""

        if self.blocked_move_direct["left"]:
            return

        self.position[0] -= self.time_manager.delta_time * self.move_speed

    def draw(self, tile_size, surface):

        if self.idle:
            current_animation = self.idle_animation[self.current_direction]
        else:
            current_animation = self.animation[self.current_direction]

        current_animation.update_time(self.time_manager.delta_time)

        current_sprite = current_animation.get_current_sprite()
        # resize the object by scale
        current_sprite = pygame.transform.scale(
            current_sprite,
            (int(tile_size * self.scale[0]), int(tile_size * self.scale[1]))
        )

        pos_x, pos_y = self.get_world_position_funct(
            self.position[0],
            self.position[1]
        )

        surface.blit(current_sprite, (pos_x, pos_y))
Example #22
0
def run(self):
    X, y = make_classification(n_samples=self.n_samples,
                               n_features=self.n_features,
                               n_informative=2,
                               n_redundant=0,
                               n_classes=self.n_classes,
                               n_clusters_per_class=1)

    plt.cool()
    plt.figure(figsize=(8, 8))
    plt.scatter(X[:, 0], X[:, 1], marker='o', c=y, s=40, edgecolor='k')

    # Training, Validation and Test split
    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=TEST_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, test_idxs = next(sss.split(X, y))

    X_train, X_test = X[training_idxs], X[test_idxs]
    y_train, y_test = y[training_idxs], y[test_idxs]

    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=VALIDATION_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, validation_idxs = next(sss.split(X_train, y_train))

    X_train, X_valid = X_train[training_idxs], X_train[validation_idxs]
    y_train, y_valid = y_train[training_idxs], y_train[validation_idxs]

    # Scaling of X
    ss = StandardScaler()
    X_train_scaled = ss.fit_transform(X_train)
    X_valid_scaled = ss.transform(X_valid)
    X_test_scaled = ss.transform(X_test)

    train_dataloader = get_data_loader(X_train_scaled, y_train)
    val_dataloader = get_data_loader(X_valid_scaled, y_valid)
    test_dataloader = get_data_loader(X_test_scaled, y_test)

    print('Training clean model...')
    model = FFSimpleNet(input_dim=self.n_features, output_dim=1)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    train_accuracies, val_accuracies, test_accuracies, epoch, model = early_stopping(
        model, train_dataloader, val_dataloader, test_dataloader, optimizer,
        device)

    plt.figure(figsize=(8, 8))
    plt.plot(range(epoch), train_accuracies, label='Train')
    plt.plot(range(epoch), val_accuracies, label='Validation')
    plt.plot(range(epoch), test_accuracies, label='Test')
    plt.xlabel('Epochs', fontsize=16)
    plt.ylabel('Accuracy', fontsize=16)
    plt.legend(loc='upper left')

    X_s = StandardScaler().fit_transform(X)
    X_noisy = rotate(X_s, theta=120.0)

    plt.figure(figsize=(16, 8))
    plt.subplot(1, 2, 1)
    plt.title('Original scaled data scatter plot')
    plt.scatter(X_s[:, 0], X_s[:, 1], marker='o', c=y, s=40, edgecolor='k')

    plt.subplot(1, 2, 2)
    plt.title('Noisy data scatter plot')
    plt.scatter(X_noisy[:, 0],
                X_noisy[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k')

    # Training, Validation and Test split for noisy samples
    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=TEST_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, test_idxs = next(sss.split(X_noisy, y))

    X_noisy_train, X_noisy_test = X_noisy[training_idxs], X_noisy[test_idxs]
    y_train, y_test = y[training_idxs], y[test_idxs]

    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=VALIDATION_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, validation_idxs = next(sss.split(X_noisy_train, y_train))

    X_noisy_train, X_noisy_valid = X_noisy_train[training_idxs], X_noisy_train[
        validation_idxs]
    y_train, y_valid = y_train[training_idxs], y_train[validation_idxs]

    # Selection of top, random, bottom samples per property
    X_noisy_top, y_top = get_samples_by_property(model,
                                                 X_noisy_train,
                                                 y_train,
                                                 self.property_perc,
                                                 most=True,
                                                 prop=self.property)
    X_noisy_random, y_random = get_random_subset(X_noisy_train, y_train,
                                                 self.property_perc)
    X_noisy_bottom, y_bottom = get_samples_by_property(model,
                                                       X_noisy_train,
                                                       y_train,
                                                       self.property_perc,
                                                       most=False,
                                                       prop=self.property)

    X_noisy_viz = StandardScaler().fit_transform(X_noisy)

    ss = StandardScaler()
    ss.fit(X_noisy_train)

    X_noisy_top_viz = ss.transform(X_noisy_top)
    X_noisy_random_viz = ss.transform(X_noisy_random)
    X_noisy_bottom_viz = ss.transform(X_noisy_bottom)

    weight1, weight2, bias = get_params(model)

    plt.figure(figsize=(20, 4))
    plt.subplot(1, 5, 1)
    plt.title('Original scaled data scatter plot')
    plt.scatter(X_s[:, 0], X_s[:, 1], marker='o', c=y, s=40, edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')

    plt.subplot(1, 5, 2)
    plt.title('Noisy scaled data scatter plot')
    plt.scatter(X_noisy_viz[:, 0],
                X_noisy_viz[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')

    plt.subplot(1, 5, 3)
    plt.title('Top {} scaled - {}'.format(self.property_perc, self.property))
    plt.scatter(X_noisy_top_viz[:, 0],
                X_noisy_top_viz[:, 1],
                marker='o',
                c=y_top,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')

    plt.subplot(1, 5, 4)
    plt.title('Random {} scaled - {}'.format(self.property_perc,
                                             self.property))
    plt.scatter(X_noisy_random_viz[:, 0],
                X_noisy_random_viz[:, 1],
                marker='o',
                c=y_random,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')

    plt.subplot(1, 5, 5)
    plt.title('Bottom {} scaled - {}'.format(self.property_perc,
                                             self.property))
    plt.scatter(X_noisy_bottom_viz[:, 0],
                X_noisy_bottom_viz[:, 1],
                marker='o',
                c=y_bottom,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')

    # RESCALE TO TRAIN
    X_noisy_top_s = StandardScaler().fit_transform(X_noisy_top)
    X_noisy_random_s = StandardScaler().fit_transform(X_noisy_random)
    X_noisy_bottom_s = StandardScaler().fit_transform(X_noisy_bottom)
    X_noisy_train_s = StandardScaler().fit_transform(X_noisy_train)
    X_noisy_s = StandardScaler().fit_transform(X_noisy)
    X_s = StandardScaler().fit_transform(X)

    noisy_top_dataloader = get_data_loader(X_noisy_top_s, y_top)
    noisy_random_dataloader = get_data_loader(X_noisy_random_s, y_random)
    noisy_bottom_dataloader = get_data_loader(X_noisy_bottom_s, y_bottom)

    # Top Train and Validation split
    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=VALIDATION_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, validation_idxs = next(sss.split(X_noisy_top, y_top))

    X_noisy_top_train, X_noisy_top_valid = X_noisy_top[
        training_idxs], X_noisy_top[validation_idxs]
    y_top_train, y_top_valid = y_top[training_idxs], y_top[validation_idxs]

    # Random Train and Validation split
    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=VALIDATION_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, validation_idxs = next(sss.split(X_noisy_random, y_random))

    X_noisy_random_train, X_noisy_random_valid = X_noisy_random[
        training_idxs], X_noisy_random[validation_idxs]
    y_random_train, y_random_valid = y_random[training_idxs], y_random[
        validation_idxs]

    # Bottom Train and Validation split
    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=VALIDATION_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, validation_idxs = next(sss.split(X_noisy_bottom, y_bottom))

    X_noisy_bottom_train, X_noisy_bottom_valid = X_noisy_bottom[
        training_idxs], X_noisy_bottom[validation_idxs]
    y_bottom_train, y_bottom_valid = y_bottom[training_idxs], y_bottom[
        validation_idxs]

    # Scaling of X_noisy
    ss_top = StandardScaler()
    X_noisy_top_train_scaled = ss_top.fit_transform(X_noisy_top_train)
    X_noisy_top_valid_scaled = ss_top.transform(X_noisy_top_valid)

    ss_random = StandardScaler()
    X_noisy_random_train_scaled = ss_random.fit_transform(X_noisy_random_train)
    X_noisy_random_valid_scaled = ss_random.transform(X_noisy_random_valid)

    ss_bottom = StandardScaler()
    X_noisy_bottom_train_scaled = ss_bottom.fit_transform(X_noisy_bottom_train)
    X_noisy_bottom_valid_scaled = ss_bottom.transform(X_noisy_bottom_valid)

    X_noisy_test_top_scaled = ss_top.transform(X_noisy_test)
    X_noisy_test_random_scaled = ss_random.transform(X_noisy_test)
    X_noisy_test_bottom_scaled = ss_bottom.transform(X_noisy_test)

    train_noisy_top_dataloader = get_data_loader(X_noisy_top_train_scaled,
                                                 y_top_train)
    valid_noisy_top_dataloader = get_data_loader(X_noisy_top_valid_scaled,
                                                 y_top_valid)

    train_noisy_random_dataloader = get_data_loader(
        X_noisy_random_train_scaled, y_random_train)
    valid_noisy_random_dataloader = get_data_loader(
        X_noisy_random_valid_scaled, y_random_valid)

    train_noisy_bottom_dataloader = get_data_loader(
        X_noisy_bottom_train_scaled, y_bottom_train)
    valid_noisy_bottom_dataloader = get_data_loader(
        X_noisy_bottom_valid_scaled, y_bottom_valid)

    test_noisy_top_dataloader = get_data_loader(X_noisy_test_top_scaled,
                                                y_test)
    test_noisy_random_dataloader = get_data_loader(X_noisy_test_random_scaled,
                                                   y_test)
    test_noisy_bottom_dataloader = get_data_loader(X_noisy_test_bottom_scaled,
                                                   y_test)

    print('Finetuning on top samples...')
    model_top = copy.deepcopy(model)
    optimizer = torch.optim.SGD(model_top.parameters(), lr=0.01)

    train_top_accuracies, val_top_accuracies, test_top_accuracies, epoch_top, model_top = early_stopping(
        model_top, train_noisy_top_dataloader, valid_noisy_top_dataloader,
        test_noisy_top_dataloader, optimizer, device)
    print('Finetuning on random samples...')
    # Finetuning the model on random samples
    model_random = copy.deepcopy(model)
    optimizer = torch.optim.SGD(model_random.parameters(), lr=0.01)

    train_random_accuracies, val_random_accuracies, test_random_accuracies, epoch_random, model_random = early_stopping(
        model_random, train_noisy_random_dataloader,
        valid_noisy_random_dataloader, test_noisy_random_dataloader, optimizer,
        device)

    print('Finetuning on bottom samples...')
    # Finetuning the model on bottom samples
    model_bottom = copy.deepcopy(model)
    optimizer = torch.optim.SGD(model_bottom.parameters(), lr=0.01)

    train_bottom_accuracies, val_bottom_accuracies, test_bottom_accuracies, epoch_bottom, model_bottom = early_stopping(
        model_bottom, train_noisy_bottom_dataloader,
        valid_noisy_bottom_dataloader, test_noisy_bottom_dataloader, optimizer,
        device)

    weight1_top, weight2_top, bias_top = get_params(model_top)
    weight1_random, weight2_random, bias_random = get_params(model_random)
    weight1_bottom, weight2_bottom, bias_bottom = get_params(model_bottom)

    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    plt.title('Original data scatter plot')
    plt.scatter(X[:, 0],
                X[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k',
                alpha=0.3)
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')

    plt.subplot(1, 2, 2)
    plt.title('Original rescaled data scatter plot')
    plt.scatter(X_s[:, 0],
                X_s[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k',
                alpha=0.3)
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')

    # Training, Validation and Test split
    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=TEST_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, test_idxs = next(sss.split(X_noisy, y))

    X_train_best, X_test_best = X_noisy[training_idxs], X_noisy[test_idxs]
    y_train_best, y_test_best = y[training_idxs], y[test_idxs]

    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=VALIDATION_PERCENTAGE,
                                 random_state=RANDOM_SEED)
    training_idxs, validation_idxs = next(sss.split(X_train_best,
                                                    y_train_best))

    X_train_best, X_valid_best = X_train_best[training_idxs], X_train_best[
        validation_idxs]
    y_train_best, y_valid_best = y_train_best[training_idxs], y_train_best[
        validation_idxs]

    ss = StandardScaler()
    # Scaling of X
    X_train_best_scaled = ss.fit_transform(X_train_best)
    X_valid_best_scaled = ss.transform(X_valid_best)
    X_test_best_scaled = ss.transform(X_test_best)

    train_best_dataloader = get_data_loader(X_train_best_scaled, y_train_best)
    val_best_dataloader = get_data_loader(X_valid_best_scaled, y_valid_best)
    test_best_dataloader = get_data_loader(X_test_best_scaled, y_test_best)

    model_best = FFSimpleNet(input_dim=self.n_features, output_dim=1)
    optimizer = torch.optim.SGD(model_best.parameters(), lr=0.01)
    train_accuracies, val_accuracies, test_accuracies, epoch, model_best = early_stopping(
        model_best, train_best_dataloader, val_best_dataloader,
        test_best_dataloader, optimizer, device)

    weight1_noisy, weight2_noisy, bias_noisy = get_params(model_best)

    plt.figure(figsize=(15, 5))
    plt.subplot(1, 3, 1)
    plt.title('Original rescaled data scatter plot')
    plt.scatter(X_s[:, 0],
                X_s[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k',
                alpha=0.3)
    abline(weight1, weight2, bias, label='Original DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -4, 4])

    plt.subplot(1, 3, 2)
    plt.title('Noisy rescaled data scatter plot')
    plt.scatter(X_noisy_s[:, 0],
                X_noisy_s[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k',
                alpha=0.3)
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_noisy, weight2_noisy, bias_noisy, label='Best DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -4, 4])

    plt.subplot(1, 3, 3)
    plt.title('Noisy training rescaled data scatter plot')
    plt.scatter(X_noisy_train_s[:, 0],
                X_noisy_train_s[:, 1],
                marker='o',
                c=y_train,
                s=40,
                edgecolor='k',
                alpha=0.3)
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_noisy, weight2_noisy, bias_noisy, label='Best DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -4, 4])

    plt.figure(figsize=(15, 10))
    plt.subplot(2, 3, 1)
    plt.title('Top {} plot - {}'.format(self.property_perc, self.property))
    plt.scatter(X_noisy_top_viz[:, 0],
                X_noisy_top_viz[:, 1],
                marker='o',
                c=y_top,
                s=40,
                edgecolor='k',
                alpha=0.3)
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_top, weight2_top, bias_top, label='Top DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -4, 4])

    plt.subplot(2, 3, 2)
    plt.title('Random {} plot - {}'.format(self.property_perc, self.property))
    plt.scatter(X_noisy_random_viz[:, 0],
                X_noisy_random_viz[:, 1],
                marker='o',
                c=y_random,
                s=40,
                edgecolor='k',
                alpha=0.3)
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_random, weight2_random, bias_random, label='Random DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -4, 4])

    plt.subplot(2, 3, 3)
    plt.title('Bottom {} plot - {}'.format(self.property_perc, self.property))
    plt.scatter(X_noisy_bottom_viz[:, 0],
                X_noisy_bottom_viz[:, 1],
                marker='o',
                c=y_bottom,
                alpha=0.3,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_bottom, weight2_bottom, bias_bottom, label='Bottom DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -4, 4])

    plt.figure(figsize=(15, 5))
    plt.subplot(1, 3, 1)
    plt.title('Noisy scaled data scatter plot - Top DB')
    plt.scatter(X_noisy_s[:, 0],
                X_noisy_s[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_top, weight2_top, bias_top, label='Top DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -6, 6])

    plt.subplot(1, 3, 2)
    plt.title('Noisy scaled data scatter plot - Random DB')
    plt.scatter(X_noisy_s[:, 0],
                X_noisy_s[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_random, weight2_random, bias_random, label='Random DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -6, 6])

    plt.subplot(1, 3, 3)
    plt.title('Noisy scaled data scatter plot - Bottom DB')
    plt.scatter(X_noisy_s[:, 0],
                X_noisy_s[:, 1],
                marker='o',
                c=y,
                s=40,
                edgecolor='k')
    abline(weight1, weight2, bias, label='Original DB')
    abline(weight1_bottom, weight2_bottom, bias_bottom, label='Bottom DB')
    plt.legend(loc='best')
    plt.axis([-6, 6, -6, 6])

    plt.figure(figsize=(18, 5))

    plt.subplot(1, 3, 1)
    plt.plot(range(epoch_top), train_top_accuracies, label='Train')
    plt.plot(range(epoch_top), val_top_accuracies, label='Validation')
    plt.plot(range(epoch_top), test_top_accuracies, label='Test')
    plt.title('Top {} - {}'.format(self.property_perc, self.property),
              fontsize=16)
    plt.xlabel('Epochs', fontsize=16)
    plt.ylabel('Accuracy', fontsize=16)
    plt.legend(loc='upper left')

    plt.subplot(1, 3, 2)
    plt.plot(range(epoch_random), train_random_accuracies, label='Train')
    plt.plot(range(epoch_random), val_random_accuracies, label='Validation')
    plt.plot(range(epoch_random), test_random_accuracies, label='Test')
    plt.title('Random {} - {}'.format(self.property_perc, self.property),
              fontsize=16)
    plt.xlabel('Epochs', fontsize=16)
    plt.ylabel('Accuracy', fontsize=16)
    plt.legend(loc='upper left')

    plt.subplot(1, 3, 3)
    plt.plot(range(epoch_bottom), train_bottom_accuracies, label='Train')
    plt.plot(range(epoch_bottom), val_bottom_accuracies, label='Validation')
    plt.plot(range(epoch_bottom), test_bottom_accuracies, label='Test')
    plt.title('Bottom {} - {}'.format(self.property_perc, self.property),
              fontsize=16)
    plt.xlabel('Epochs', fontsize=16)
    plt.ylabel('Accuracy', fontsize=16)
    plt.legend(loc='upper left')

    plt.show()

    ######################## ANIMATIONS ########################
    from animator import Animator

    property_perc = 0.1
    n_epochs = 100
    prop = 'entropy'
    alternate = True

    print('Creating animations...')
    animator = Animator(X_noisy_train_s,
                        y_train,
                        prop,
                        property_perc,
                        alternate,
                        n_epochs,
                        interval=100)

    decision_boundaries_rand, train_losses_rand, train_accuracies_rand, test_accuracies_rand, subsets_rand = self.toy_run_recompute(
        model,
        X_noisy_train,
        y_train,
        X_noisy_test,
        y_test,
        n_epochs,
        prop=prop,
        property_perc=property_perc,
        most=True,
        random=True,
        alternate=False)
    decision_boundaries_top, train_losses_top, train_accuracies_top, test_accuracies_top, subsets_top = self.toy_run_recompute(
        model,
        X_noisy_train,
        y_train,
        X_noisy_test,
        y_test,
        n_epochs,
        prop=prop,
        property_perc=property_perc,
        most=True,
        random=False,
        alternate=alternate)
    decision_boundaries_bottom, train_losses_bottom, train_accuracies_bottom, test_accuracies_bottom, subsets_bottom = self.toy_run_recompute(
        model,
        X_noisy_train,
        y_train,
        X_noisy_test,
        y_test,
        n_epochs,
        prop=prop,
        property_perc=property_perc,
        most=False,
        random=False,
        alternate=alternate)
    decision_boundaries_all, train_losses_all, train_accuracies_all, test_accuracies_all, subsets_all = self.toy_run_recompute(
        FFSimpleNet(input_dim=self.n_features, output_dim=1),
        X_noisy_train,
        y_train,
        X_noisy_test,
        y_test,
        n_epochs,
        prop=prop,
        property_perc=1.0,
        most=False,
        random=True,
        alternate=False)

    animation = animator.run(decision_boundaries_rand, train_accuracies_rand,
                             test_accuracies_rand, decision_boundaries_top,
                             train_accuracies_top, test_accuracies_top,
                             decision_boundaries_bottom,
                             train_accuracies_bottom, test_accuracies_bottom,
                             decision_boundaries_all, train_accuracies_all,
                             test_accuracies_all)
    animation.save('animations/{}={}_{}={}.gif'.format(prop, property_perc,
                                                       'alternate', alternate),
                   dpi=100)

    def toy_run_recompute(self,
                          model,
                          X_noisy_train,
                          y_train,
                          X_noisy_test,
                          y_test,
                          total_epochs,
                          prop,
                          property_perc,
                          most,
                          random=False,
                          alternate=False):
        decision_boundaries = []
        train_losses = []
        train_accuracies = []
        test_accuracies = []
        subsets = []

        # Scaling the test set
        ss = StandardScaler()
        ss.fit(X_noisy_train)
        X_noisy_test_scaled = ss.transform(X_noisy_test)
        noisy_test_dataloader = get_data_loader(X_noisy_test_scaled,
                                                y_test,
                                                shuffle=False)

        # Getting the starting point of the decision boundary
        params = model.state_dict()
        weight1 = params['f1.weight'][0][0].numpy()
        weight2 = params['f1.weight'][0][1].numpy()
        bias = params['f1.bias'][0].numpy()

        initial_DB = [weight1, weight2, bias]
        decision_boundaries.append(initial_DB)

        new_model = copy.deepcopy(model)
        criterion = WeightedExponentialLoss()
        optimizer = torch.optim.SGD(new_model.parameters(), lr=0.01)

        if random:
            X_noisy_subset, y_subset = get_random_subset(
                X_noisy_train, y_train, property_perc)

        for epoch in range(total_epochs):
            if alternate:
                if epoch % 2 == 0:
                    X_noisy_subset, y_subset = get_samples_by_property(
                        new_model,
                        X_noisy_train,
                        y_train,
                        property_perc,
                        most=most,
                        prop=prop)
                else:
                    X_noisy_subset, y_subset = get_random_subset(
                        X_noisy_train, y_train, property_perc)
            else:
                if not random:
                    X_noisy_subset, y_subset = get_samples_by_property(
                        new_model,
                        X_noisy_train,
                        y_train,
                        property_perc,
                        most=most,
                        prop=prop)
            # Rescale the input
            X_noisy_subset_scaled = StandardScaler().fit_transform(
                X_noisy_subset)
            subsets.append([X_noisy_subset_scaled, y_subset])

            # Data Loader Generation with weighted sampler
            dataset = ToyDataset(X=X_noisy_subset_scaled, y=y_subset)
            weighted_sampler = WeightedSampler(dataset=dataset)
            weighted_sampler.update_weights(model=model,
                                            dataset=dataset,
                                            learning_rate=1e-3)
            noisy_subset_dataloader = get_data_loader(X_noisy_subset_scaled,
                                                      y_subset,
                                                      sampler=weighted_sampler,
                                                      shuffle=False)

            # One epoch train
            train_epoch_loss, train_epoch_acc = train(new_model,
                                                      noisy_subset_dataloader,
                                                      optimizer, criterion,
                                                      device)

            # Evaluation
            test_epoch_acc = evaluate(new_model, noisy_test_dataloader, device)

            # Getting the new decision boundary
            new_params = new_model.state_dict()
            new_weight1 = new_params['f1.weight'][0][0].numpy().copy()
            new_weight2 = new_params['f1.weight'][0][1].numpy().copy()
            new_bias = new_params['f1.bias'][0].numpy().copy()

            new_DB = [new_weight1, new_weight2, new_bias]

            decision_boundaries.append(new_DB)
            train_losses.append(train_epoch_loss)
            train_accuracies.append(train_epoch_acc)
            test_accuracies.append(test_epoch_acc)

        return decision_boundaries, train_losses, train_accuracies, test_accuracies, subsets
def sequence_light(lightnum,param,vlist,steplist):
    """Set up a sequence of lighting changes"""
    light = GL_LIGHT0 + lightnum
    if light not in conditions:
        conditions[light] = Animator()
    conditions[light].sequence(param,vlist,steplist)
def main():

    #user inputs
    markers = 3
    given = 0  #set to one if data is given

    #instatiate objects
    rob = Rob2Wh()
    animate = Animator()
    mc = Monte_Carlo()
    plot = Plotter()

    #create lists
    t = []
    vc = []
    wc = []
    xt = []
    yt = []
    tht = []
    zt = []
    mu = []
    Sig = []
    xe = []
    ye = []
    the = []
    Xk = []

    #collect initial parameters
    states_new = np.array([[rob.x0], [rob.y0], [rob.th0]])
    elements = int(rob.tf / rob.dt)
    M = rob.particles

    #collect given info
    if given != 0:
        t_given = rob.ttr
        x_given = rob.xtr
        y_given = rob.ytr
        th_given = rob.thtr
        v_given = rob.vtr
        w_given = rob.wtr
        z_given = np.squeeze(np.array([[rob.z_rtr], [rob.z_btr]]))

    #initialize particles
    Xkt = mc.uniform_point_cloud(rob.xgrid, rob.ygrid, M)
    # Xkt = np.array([[rob.x0]*M, [rob.y0]*M, [rob.th0]*M])
    ##loop through each time step
    for i in range(0, elements + 1):

        ##extract truth for time step
        if given == 0:  #generate truth
            t.append(i * rob.dt)
            vc_new, wc_new = rob.generate_command(t[i])
            ut = np.array([[vc_new], [wc_new]])

            #propogate truth
            # if i != 0:
            states_new = rob.vel_motion_model(ut, states_new)
            z_new = rob.simulate_sensor(states_new)

        else:  #get truth from given data
            print('truth given')
            t.append(t_given[0][i])
            vc_new, wc_new = rob.generate_command(t[i])
            ut = np.array([[vc_new], [wc_new]])
            states_new = np.array(
                [x_given[0][i], y_given[0][i], th_given[0][i]])
            if markers == 1:
                z_new = np.array([[z_given[0, i], 0, 0, z_given[1, i], 0,
                                   0]]).T
            else:
                z_new = rob.simulate_sensor(
                    states_new
                )  #may need to change depending on the data given

        Xkt = mc.monte_carlo(Xkt, ut, z_new, M, rob)
        x_new = np.mean(Xkt[0, :])
        y_new = np.mean(Xkt[1, :])
        th_new = np.mean(Xkt[2, :])
        mu_new = np.array([x_new, y_new, th_new])
        Sig_new = np.cov(Xkt)

        #append values to lists
        mu.append(mu_new)
        Sig.append(Sig_new)
        xt.append(states_new[0])
        yt.append(states_new[1])
        tht.append(states_new[2])
        xe.append(mu_new[0] - xt[i])
        ye.append(mu_new[1] - yt[i])
        the.append(mu_new[2] - tht[i])
        Xk.append(Xkt)

    #prep varialbes for plotting and animation
    size = len(mu)
    x_hat = []
    y_hat = []
    th_hat = []
    sig_x = []
    sig_y = []
    sig_th = []
    for i in range(size):
        x_hat.append(mu[i][0])  #hats are the estimates
        y_hat.append(mu[i][1])
        th_hat.append(mu[i][2])
        sig_x.append(Sig[i][0][0])
        sig_y.append(Sig[i][1][1])
        sig_th.append(Sig[i][2][2])

    animate.animator(xt, yt, tht, x_hat, y_hat, th_hat, elements, rob, Xk)

    plot.plotting(x_hat, xt, y_hat, yt, th_hat, tht,\
        t, xe, ye, the, sig_x, sig_y, sig_th)

    return (xt, yt, tht, zt, mu, Sig)
Example #25
0
 def __init__(self, path, position, name):
     self.position = position  # позиция
     self.name = name  # имя объекта
     self.animator = Animator('Sprites/' + path)  # аниматор
Example #26
0
            mglass[t_train - 20], mglass[t_train - 15], mglass[t_train - 10],
            mglass[t_train - 5], mglass[t_train]
        ])))
x_valid = torch.t(
    Variable(
        torch.Tensor([
            mglass[t_valid - 20], mglass[t_valid - 15], mglass[t_valid - 10],
            mglass[t_valid - 5], mglass[t_valid]
        ])))

target_train = Variable(torch.Tensor(mglass[t_train + 5]))
target_valid = Variable(torch.Tensor(mglass[t_valid + 5]))
validation_target_noiseless = Variable(
    torch.Tensor(mglass_noiseless[t_valid + 5]))

animator = Animator(t_train, pp.revert(mglass[t_train + 5]), t_valid,
                    pp.revert(mglass[t_valid + 5]))

learning_rate = 0.5
alpha = 0.9
alphainv = 1 - alpha
regularization = 0.1
hidden_size = 3
hidden_size_2 = 3

neural_net = nn.Sequential(nn.Linear(5, hidden_size), nn.Sigmoid(),
                           nn.Linear(hidden_size, hidden_size_2), nn.Sigmoid(),
                           nn.Linear(hidden_size_2, 1))

loss_over_epoch = []
valid_over_epoch = []
dma_over_epoch = []
Example #27
0
 def animator(self):
     return Animator(self)
import os
import scipy
import scipy.io as sio
from importlib import reload, import_module
import math
import numpy as np
from matplotlib import pyplot as plt
ocp_grid_map = reload(import_module("ocp_grid_map"))
from ocp_grid_map import OcpGridMap
animator = reload(import_module("animator"))
from animator import Animator
utils = reload(import_module("utils"))

##########################

animate = Animator()
mapping = OcpGridMap()
params = utils.read_param('map_params.yaml')

# fig1, ax = plt.subplots()
#load given data on robot position
given = sio.loadmat('state_meas_data.mat')
X = given['X']  #x,y,th of the robot at each time step
z = np.nan_to_num(given['z'], np.inf)  #range and bearing at each time step
thk = given['thk']  #11 angles for the laser range finders
#set up log ratios l = log(p/(1-p))
p0 = params['map0'] * np.ones((params['xlim'], params['ylim']))
l0 = np.log(p0 / (1 - p0))
lprev_i = np.log(p0 / (1 - p0))

steps = len(X[1, :])
def main():

    markers = 3
    given = 1
    rob = Rob2Wh()
    animate = Animator()

    t = []
    vc = []
    wc = []
    x = []
    y = []
    th = []
    v = []
    w = []
    z = []
    mu = []
    Sig = []
    K = []
    xe = []
    ye = []
    the = []
    ve = []
    we = []
    x_new = rob.x0
    y_new = rob.y0
    th_new = rob.th0
    mu_prev = np.array([[x_new], [y_new], [th_new]])
    Sig_prev = np.array([[0.1, 0.0, 0.0], [0.0, 0.1, 0.0], [0.0, 0.0, 0.1]])
    elements = int(rob.tf / rob.dt)

    t_given = rob.ttr
    x_given = rob.xtr
    y_given = rob.ytr
    th_given = rob.thtr
    v_given = rob.vtr
    w_given = rob.wtr
    # z_given = np.squeeze(np.array([[rob.z_rtr],[rob.z_btr]]))

    for i in range(0, elements + 1):
        if given == 0:
            t.append(i * rob.dt)
            vc_new, wc_new = generate_command(t[i])
            (x_new, y_new, th_new, v_new,
             w_new) = rob.vel_motion_model(vc_new, wc_new, x_new, y_new,
                                           th_new)
            u_new = np.array([vc_new, wc_new])
            z_new = rob.simulate_sensor(x_new, y_new, th_new)
        else:
            t.append(t_given[0][i])
            vc_new, wc_new = generate_command(t[i])
            x_new = x_given[0][i]
            y_new = y_given[0][i]
            th_new = th_given[0][i]
            u_new = np.array([v_given[0][i], w_given[0][i]])
            z_new = rob.simulate_sensor(x_new, y_new, th_new)
            # if markers == 1:
            #     z_new = np.array([[z_given[0,i], 0, 0, z_given[1,i], 0, 0]]).T
            # else:
            #     z_new = rob.simulate_sensor(x_new, y_new, th_new)

        for j in range(markers):
            marker = j
            mu_new, Sig_new, K_new = rob.UKF(mu_prev, Sig_prev, u_new, z_new,
                                             marker)
            mu_prev = mu_new
            Sig_prev = Sig_new

        mu.append(mu_new)
        Sig.append(Sig_new)
        K.append(K_new)
        x.append(x_new)
        y.append(y_new)
        th.append(th_new)
        v.append(u_new[0])
        w.append(u_new[0])
        vc.append(vc_new)
        wc.append(wc_new)
        xe.append(mu_new[0] - x[i])
        ye.append(mu_new[1] - y[i])
        the.append(mu_new[2] - th[i])
        ve.append(vc_new - v[i])
        we.append(wc_new - w[i])

        mu_prev = np.array(mu_new)
        Sig_prev = np.array(Sig_new)

    size = len(mu)
    x_hat = []
    y_hat = []
    th_hat = []
    sig_x = []
    sig_y = []
    sig_th = []
    for i in range(size):
        x_hat.append(mu[i][0])
        y_hat.append(mu[i][1])
        th_hat.append(mu[i][2])
        sig_x.append(Sig[i][0][0])
        sig_y.append(Sig[i][1][1])
        sig_th.append(Sig[i][2][2])
    animate.animator(x, y, th, x_hat, y_hat, th_hat, elements)

    rob.plotting(x_hat, x, y_hat, y, th_hat, th, vc, v, wc, w,\
        t, xe, ye, the, ve, we, K, sig_x, sig_y, sig_th)

    return (x, y, th, z, mu, Sig)
def adjust_light(lightnum,param,value,ms=0):
    """Set up a gradual changes of a lighting parameter"""
    light = GL_LIGHT0 + lightnum
    if light not in conditions:
        conditions[light] = Animator()
    conditions[light].change(param,value,ms)