Example #1
0
def data_load():
    """ Anropas varhelst vi vill komma åt databasen (inte enbart en gång) """
    # Skickar användaren till felmeddelande
    if data.load("data.json") == None:
        abort(400)
    # Om det inte strular
    return data.load("data.json")
Example #2
0
    def create_panels(self):
        """
        Build game panels that move around and show at various model states.

        """

        score_panel = Panel(SCORE_BOX.size, DRAW_AREA)
        score_panel.background_image = pygame.image.load(data.load('puzzle_info_bg.png')).convert()
        score_panel.border_image = pygame.image.load(data.load('puzzle_info.png')).convert()
        score_panel.border_image.set_colorkey(color.magenta)
        score_panel.show_position = SCORE_BOX.topleft
        score_panel.hide_position = (- SCORE_BOX.width, 0)
        score_panel.hide(instant=True)
        self.panels['score'] = score_panel

        puzzle_panel = Panel(PUZZLE_POS.size, DRAW_AREA)
        puzzle_panel.background_image = pygame.image.load(data.load('puzzle_bg.png')).convert()
        puzzle_panel.border_image = pygame.image.load(data.load('puzzle.png')).convert()
        puzzle_panel.border_image.set_colorkey(color.magenta)
        puzzle_panel.show_position = PUZZLE_POS.topleft
        puzzle_panel.hide_position = DRAW_AREA.bottomright
        puzzle_panel.hide(instant=True)
        self.panels['puzzle'] = puzzle_panel

        arcade_panel = Panel(ARCADE_POS.size, DRAW_AREA)
        arcade_panel.background_image = pygame.image.load(data.load('arcade_bg.png')).convert()
        arcade_panel.border_image = pygame.image.load(data.load('arcade.png')).convert()
        arcade_panel.border_image.set_colorkey(color.magenta)
        earth = pygame.image.load(data.load('earth.png')).convert()
        earth.set_colorkey(color.magenta)
        somewhere_over_the_rainbow = (random.randint(0, ARCADE_POS.width), random.randint(0, ARCADE_POS.height))
        arcade_panel.background_image.blit(earth, somewhere_over_the_rainbow)
        arcade_panel.hide_position = (0, ARCADE_POS.height)
        arcade_panel.hide(instant=True)
        self.panels['arcade'] = arcade_panel

        results_screen = pygame.image.load(data.load('results.png')).convert()
        results_panel = Panel(results_screen.get_size(), DRAW_AREA)
        results_panel.background_image = results_screen
        results_panel.show_position = (
            (DRAW_AREA.width - results_panel.rect.width) / 2,
            (DRAW_AREA.height - results_panel.rect.height) / 2)
        results_panel.hide_position = (DRAW_AREA.width, 0)
        results_panel.hide(instant=True)
        self.panels['results'] = results_panel

        msg_panel = Panel(MESSAGE_POS.size, DRAW_AREA)
        msg_panel.background_image = pygame.image.load(data.load('messages_bg.png')).convert()
        msg_panel.border_image = pygame.image.load(data.load('messages.png')).convert()
        msg_panel.border_image.set_colorkey(color.magenta)
        msg_panel.show_position = MESSAGE_POS.topleft
        msg_panel.hide_position = DRAW_AREA.topright
        msg_panel.hide(instant=True)
        self.panels['messages'] = msg_panel
Example #3
0
    def __init__(self, fadin=False, showCredits=True):

        try: self.list = data.load("ranking")
        except: data.save([], "ranking")
        self.list = data.load("ranking")

        self.ranking_list = graphics.userInterface.Interface()
        self.ranking_list.addButton(0, "arrow_back.png", data.config.WIDTH * 0.1, data.config.HEIGHT * 0.1, mask="arrow_leftMask.png")
        self.ranking_list.addSlider(1, "slider.png", "slidermask.png", (0, 1000), 0, data.config.WIDTH * 0.75 + 180, data.config.HEIGHT * 0.55, vertical=True, visible=len(self.list) > 5)

        self.fadin = 256 if fadin else -1
        self.showCredits = showCredits
Example #4
0
def main():

    print "Hello from your game's main()"
    print data.load("sample.txt").read()

    # setup window
    pygame.init()
    screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.DOUBLEBUF)
    # rabbyt.set_viewport((0, 0, 640, 480), (0, 0, 640, 480))
    # rabbyt.set_default_attribs()

    screen_buf = pygame.surface.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))
    bg = pygame.surface.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))
    clock = pygame.time.Clock()

    print "Loading tile map..."
    # tiledmap = json.loads(data.load("level_map.json").read())
    # print json.dumps(tiledmap, indent=2)
    # sys.exit(0)

    p = player.Player()

    level_map = tiledmap.TiledRenderer()
    level_map.render(bg)

    # the gameloop
    keepRunning = True

    while keepRunning:
        clock.tick()

        events = pygame.event.get()

        for event in events:
            if event.type == QUIT:
                keepRunning = False
            elif event.type == KEYDOWN:
                if event.key == K_ESCAPE:
                    keepRunning = False

        key_downs = [e for e in events if e.type == KEYDOWN]
        keys_pressed = pygame.key.get_pressed()
        p.handle_input(key_downs, keys_pressed)

        p.update(level_map.platforms)

        screen_buf.blit(bg, (0, 0))
        p.render(screen_buf)

        screen.blit(screen_buf, (0, 0))
        pygame.display.flip()  # flip the buffers
Example #5
0
def init(game_type):
	assert type(game_type) is str
	
	
	# Store scene (when ready)
	scene.set('game')
	
	# Store mode (when ready)
	mode.set(game_type)

	# Load the data
	data.load()

	# Not yet loaded
	if not GAME_IMAGES:
		# Main layers
		GAME_IMAGES['delay'] = pyglet.image.load('./data/img/backgrounds/game.delay.bg.png')
		GAME_IMAGES['pause'] = pyglet.image.load('./data/img/backgrounds/game.pause.bg.png')
		GAME_IMAGES['win'] = pyglet.image.load('./data/img/backgrounds/game.end.win.bg.png')
		GAME_IMAGES['loose'] = pyglet.image.load('./data/img/backgrounds/game.end.loose.bg.png')
		
		# Sprites
		GAME_IMAGES['projectile'] = pyglet.image.load(data.get_image('projectile'))
		GAME_IMAGES['bacteria'] = pyglet.image.load(data.get_image('bacteria'))
		GAME_IMAGES['virus'] = pyglet.image.load(data.get_image("virus"))
		GAME_IMAGES['cell'] = pyglet.image.load(data.get_image('cellule'))
		GAME_IMAGES['cell1'] = pyglet.image.load(data.get_image('cellule1'))
		GAME_IMAGES['cell2'] = pyglet.image.load(data.get_image('cellule2'))
		GAME_IMAGES['cell3'] = pyglet.image.load(data.get_image('cellule3'))
		GAME_IMAGES['phagocyte'] = pyglet.image.load(data.get_image('Phagocyte'))
	
	# Faster backgrounds
	background.scene_speed('game')
	
	# No game cursor
	cursor.disable()
	
	# Game music
	if game_type is 'bacteria':
		music.play(data.get_musique())
	else:
		music.play('04')
	
	# Launch the game
	launch()
	
	# Process first update
	animer(0)
	
	return True
Example #6
0
    def start_level(self):
        global text_overlay
        text_overlay.text = ''

        global world_index, world, world_map, world_offset

        world = json.load(data.load(worlds[world_index]))
        world_map = world['tiles']
        world_offset = [0, 0]

        global map_width, map_height, map_batch
        
        map_width = world['width']
        map_height = len(world_map) / map_width
        map_batch = pyglet.graphics.Batch()

        music_file = pyglet.resource.media(world['music'])
        music_player.queue(music_file)

        global tiles

        tiles = []
        for index, material in enumerate(world_map):
            if material and material > 0:
                tile = Tile(material, (index % map_width) * TILE_SIZE, (index / map_width) * TILE_SIZE)
                tiles.append(tile)
    def respondToUserInput(self, event):
        if self.transitionTimer < 64:
            return self

        for e in self.menu_list.handle(event):
            if e.type == graphics.userInterface.BUTTONCLICKED \
            or (event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN \
            and self.menu_list.buttons[0].active == True):

                transitionTimer = 0
                display = pygame.display.get_surface()
                static = display.copy()

                blackness = pygame.Surface((data.config.WIDTH, data.config.HEIGHT))
                blackness.fill(0x000000)

                while transitionTimer <= 255:
                    display.blit(static, (0, 0))

                    blackness.set_alpha(transitionTimer, pygame.RLEACCEL)
                    display.blit(blackness, (0, 0))

                    transitionTimer += 1
                    pygame.display.flip()

                ranking = data.load("ranking")

                ranking.append((self.menu_list.getText("name"), self.score))
                ranking = sorted(ranking, key=itemgetter(1), reverse=True)[0:10]

                data.save(ranking, "ranking")

                return screen.Ranking(fadin=True, showCredits=self.color == 0x262626)

        return self
Example #8
0
 def load_test(self):
     self.y_test = np.load(self.test_pred_file).astype(np.float32)
     self.images_test = data.load('test')
     image_shapes_test = np.asarray([img.shape for img in self.images_test]).astype(np.float32)
     moments_test = np.load("data/image_moment_stats_v1_test.pkl")
     centroid_distance = np.abs(moments_test["centroids"][:, [1, 0]] - image_shapes_test / 2)
     self.info_test = np.concatenate((centroid_distance, image_shapes_test, moments_test["angles"][:, None], moments_test["minor_axes"][:, None], moments_test["major_axes"][:, None]), 1).astype(np.float32)
def MTL(filename):
    contents = {}
    mtl = None
    for line in data.load(filename, "r"):
        if line.startswith('#'): continue
        values = line.split()
        if not values: continue
        if values[0] == 'newmtl':
            mtl = contents[values[1]] = {}
        elif mtl is None:
            raise ValueError, "mtl file doesn't start with newmtl stmt"
        elif values[0] == 'map_Kd':
            # load the texture referred to by this declaration
            mtl[values[0]] = values[1]
            surf = pygame.image.load(data.filepath(mtl['map_Kd']))
            image = pygame.image.tostring(surf, 'RGBA', 1)
            ix, iy = surf.get_rect().size
            texid = mtl['texture_Kd'] = glGenTextures(1)
            glBindTexture(GL_TEXTURE_2D, texid)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
                GL_LINEAR)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
                GL_LINEAR)
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA,
                GL_UNSIGNED_BYTE, image)
        else:
            mtl[values[0]] = map(float, values[1:])
    return contents
Example #10
0
def win(graphics):
    f = data.load("img7.png")
    img = load_img(f, graphics)
    
    i=0
    menu_quit = 2
    ticks = pygame.time.get_ticks()
    t = 0
    key_pressed = 1
    next = "quit"
    while menu_quit != 1:
        pygame.event.pump()
        keys = pygame.key.get_pressed()
        if(keys[pygame.K_ESCAPE] == 1 and key_pressed == 0):
            menu_quit = 1
        if( keys[pygame.K_LEFT] == 0 and keys[pygame.K_RIGHT] == 0 and keys[pygame.K_UP] == 0 and keys[pygame.K_DOWN] == 0 and keys[pygame.K_ESCAPE] == 0 and keys[pygame.K_RETURN] == 0 and keys[pygame.K_1] == 0 and keys[pygame.K_2] == 0 and keys[pygame.K_3] == 0 and keys[pygame.K_LCTRL] == 0):
            key_pressed = 0
        if((keys[pygame.K_RETURN] == 1 or keys[pygame.K_RIGHT] == 1 or keys[pygame.K_LCTRL] == 1)  and key_pressed == 0):
            menu_quit = 1
            
        graphics.screen.blit(img, (0,0))
        
        pygame.display.flip()
        pygame.time.wait(1)
        
        t = pygame.time.get_ticks() - ticks
        ticks = pygame.time.get_ticks()
        i += 1
    return next
Example #11
0
 def __init__(self):
   wx.Frame.__init__(self, None, title = MainWindow.TITLE, style = wx.NO_BORDER)
   
   aBitmap = wx.Image(name = SplashScreen.SPLASH_PATH).ConvertToBitmap()
   
   sizer = wx.BoxSizer(wx.VERTICAL)
   
   pic                = wx.StaticBitmap(self, wx.ID_ANY, aBitmap)
   self.text          = wx.StaticText(self, wx.ID_ANY, size = (pic.GetSize()[0], -1), style = wx.ST_NO_AUTORESIZE)
   self.progress      = wx.Gauge(self, wx.ID_ANY)
   
   sizer.Add (pic,           1, wx.ALIGN_TOP    | wx.ALIGN_LEFT | wx.SHAPED)
   sizer.Add (self.text,     0, wx.ALIGN_BOTTOM | wx.ALIGN_LEFT | wx.EXPAND)
   sizer.Add (self.progress, 0, wx.ALIGN_BOTTOM | wx.ALIGN_LEFT | wx.EXPAND)
   
   self.text.SetFont(wx.Font(20, wx.FONTFAMILY_SCRIPT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
   
   self.SetIcon(utils_gui.make_icon(wx.Image(name = SplashScreen.ICON_PATH)))
   self.SetSizerAndFit (sizer)
   self.CenterOnScreen ( )
   self.Show ( )
   
 
   wx.Yield ( )  
   import data
   self.__application_loaded(*data.load(self))
Example #12
0
File: nn.py Project: agamat/trading
def demo():
cube = data.load(['SPY', 'QQQ'], '20100101', '20130204')

spy_px = [cube.data[('SPY', 'adjclose')][dt] for dt in cube.get_dates()]
qqq_px = [cube.data[('QQQ', 'adjclose')][dt] for dt in cube.get_dates()]
minspy = min(spy_px)
maxspy = max(spy_px)
minqqq = min(qqq_px)
maxqqq = max(qqq_px)
spy_px = [(px - minspy) / (maxspy - minspy) for px in spy_px]
qqq_px = [(px - minqqq) / (maxqqq - minqqq) for px in qqq_px]

spy_inp = [[spy_px[j-i] for i in range(10, 0, -1)] for j in range(10, len(spy_px))]
spy_tar = [spy_px[i] for i in range(10, len(spy_px))]
qqq_inp = [[qqq_px[j-i] for i in range(10, 0, -1)] for j in range(10, len(qqq_px))]
qqq_tar = [qqq_px[i] for i in range(10, len(qqq_px))]
inp = np.array([z[0] + z[1] for z in zip(spy_inp, qqq_inp)])
tar = np.array([[z[0]] + [z[1]] for z in zip(spy_tar, qqq_tar)])
net = nl.net.newff([[0,1]]*20, [41,40,2])
#inp = np.array(spy_inp)
#tar = np.array([spy_tar]).reshape(len(inp), 1)
#net = nl.net.newff([[0,1]]*10, [51,2,1])
error = net.train(inp,tar,epochs=500, show=100, goal=0.02)
	
	
	returns = {}	
	for dt in cube.get_dates()[1:]:
		yesterday = cube.go_back(dt, 1)
		px_T = cube.data[('SPY', 'adjclose')][dt]
		px_Tm1 = cube.data[('SPY', 'adjclose')][yesterday]
		r = px_T / px_Tm1
		returns[dt] = px_T	
	maxreturn = max(returns.values())
	minreturn = min(returns.values())
	for k, v in returns.items():
		returns[k] = (v - minreturn) / (maxreturn - minreturn)
	
	xs = []
	ys = []
	pat = []
	dates = cube.get_dates()
	for dt in dates[11:]:		
		contents = [returns[cube.go_back(dt, x)] for x in range(10, 0, -1)]
		target = returns[dt]
		xs.append(contents)
		ys.append([target])
		pat.append([contents, [target]])
	
	import neurolab as nl
	import numpy as np	
	inp = np.array(xs)

	tar = np.array(ys)
	
	net = nl.net.newp([[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1]],1)	
	error = net.train(inp, tar, epochs=500, show=100)
	print 'error', error
	
	out = net.sim([inp[-1]])
	print 'out', out
def run():
	test_data = data.load()["actives"].values()  # active abilities

	# fail test if test_data is empty
	if test_data is None:
		print ("Fail: No test data loaded")
		return 0

	al = AbilityList()

	# loop through data creating an ability from each 'name'
	for ability in test_data:
		if al.populate({'test_ability': ability}, ABILITY_TYPE.ACTIVE):
			print("\nAdded: %s" % ability["name"])
			print()
			print("Print object: \n")
			try:
				print(al.collections[ABILITY_TYPE.ACTIVE][ability["name"]])
			except KeyError:
				print("Invalid data, printing debug info...")
				print(al.collections[ABILITY_TYPE.ACTIVE])
			print()
		else:
			print("Failed!\n")
			# TODO Add debug info as to why this test failed.
			return 0

	return 1
def run():
	# List of all abilities
	test_data = data.load()

	# For each list in the test_data loaded.
	for d in test_data.values():
		# turn each result into a list
		l = list(d.values())

		print("Number of Abilities Loaded: %s" % len(l))
		print()
		print("Raw Data: \n")
		print(d.values())

		print()
		print("Printing random ability name: \n")
		print(l[0]["name"])
		print()

		try:
			provides = l[0]['provides']
			for k,v in provides.items():
				print("\nDisplay formatted provides tree: \n")
				print(k)
				print(v)
				print()
		except KeyError as e:
			print("Fail: Ability selected does not have a provides attribute")

	# line for readibility
	print()

	return 1  # pass
Example #15
0
def mail_task(task, note):
    """
    Mails a task to '*****@*****.**'.

    Args:
        task:  The task you wish to have mailed as a string value.
        note:  The note you wish associated with the task, as a string value.
    """
    try:
        logging.info("Loading user data.")
        user_data = data.load()
        if user_data.password_prompt:
            user_data.password = getpass.getpass("Please enter your password: "******"Error mailing task: {error}".format(error=e))
        logging.error("Stacktrace: {st}".format(st=traceback.format_exc()))
        print "There was an error sending your task: {error}".format(error=e)

    except data.WLTaskUserDataException:
        logging.info("User data not found. Run with --config.")
        print "No configuration found!  Please run \"wltask --config\" to setup your account."
        sys.exit(0)
    except IOError:
        logging.error("There was a problem loading user data.  The file exists but it could not be read.")
        print "There was an error reading your configuration file.  Try again or delete your"
        print "configuration file and rerun with -config."
        sys.exit(0)
Example #16
0
def model_selection():
    file = 'data/n228_bcdefgh.mat'
    dat = data.load(file)

    X, y = data.build(dat, range(0, 96), 'fr1', 17)

    filter = SelectKBest(chi2, k=5)
    clf = PoissonNB()

    poisson = Pipeline([('filter',filter),('pois',clf)])

    #poisson.set_params(filter__k=10).fit(X,y)

    param_grid = [{'filter__score_func': [chi2], 'filter__k': range(1, 96)},
                  {'filter__score_func': [f_classif], 'filter__k': range(1,96)}]
    grid = GridSearchCV(poisson, param_grid, n_jobs=-1, scoring=make_scorer(error_distance, greater_is_better=False)).fit(X,y)

    print "Best Params"
    print grid.best_params_
    print("Grid scores on development set:")
    print()
    for params, mean_score, scores in grid.grid_scores_:
        print("%0.3f (+/-%0.03f) for %r"
              % (mean_score, scores.std() * 2, params))
    print()
Example #17
0
def project_page(id):
    """
    Using the data layer, Jinja2, and the project.html template this function
    returns a page containing information regarding the project that in the 
    database has the specified id. If the specified project does not exist, it
    returns the "404" page.

    This function is called when the URL '/project/<int:id>' is requested.

    :return: The specified projects page
    """
    db = data.load("data.json")
    project = data.get_project(db, id)
    pprint(project)
    if project is not None:
        return render_template(
            "elements/project.html", page_name="Project", project_data=project, stylesheets=["full-project.css"]
        )
    else:
        return render_template(
            "status_codes/404.html",
            page_name="Project",
            non_existent_url=request.path,
            stylesheets=["status_codes/404.css"],
        )
Example #18
0
 def test_permutation(self):
     #test permutation function
     for dn in self.datasets:
         D = data.load(os.path.join(self.dir_name,dn))
         perm = SP.random.permutation(D['X'].shape[0])
         #1. set permuattion
         lmm = limix.CLMM()
         lmm.setK(D['K'])
         lmm.setSNPs(D['X'])
         lmm.setCovs(D['Cov'])
         lmm.setPheno(D['Y'])
         if 1:
             #pdb.set_trace()
             perm = SP.array(perm,dtype='int32')#Windows needs int32 as long -> fix interface to accept int64 types
         lmm.setPermutation(perm)
         lmm.process()
         pv_perm1 = lmm.getPv().ravel()
         #2. do by hand
         lmm = limix.CLMM()
         lmm.setK(D['K'])
         lmm.setSNPs(D['X'][perm])
         lmm.setCovs(D['Cov'])
         lmm.setPheno(D['Y'])
         lmm.process()
         pv_perm2 = lmm.getPv().ravel()
         D2 = (SP.log10(pv_perm1)-SP.log10(pv_perm2))**2
         RV = SP.sqrt(D2.mean())
         self.assertTrue(RV<1E-6)
Example #19
0
def main_json(cache=[None, 0]): 
    """ Lazy loading main.json """
    mtime = os.path.getmtime(static_path("main.json"))
    if mtime != cache[1]:
        cache[1] = mtime
        cache[0] = data.load(static_path("main.json"))
    return cache[0]
Example #20
0
def list_page():
    """
    Using the data layer, Jinja2, and the list.html template this function
    EITHER returns the default list page (containing all the projects) OR
    if it has been requested using a POST it instead returns the list page
    containing a list of projects that fit the search parameters contained
    in the POST.

    This function is called when the URL '/list' is requested.

    :return: The list page of our portfolio(containing all or some projects from
            the data layer).
    """
    db = data.load("data.json")
    full_list = data.search(db)
    techniques = data.get_technique_stats(db)

    if request.method == "POST":
        requested_technique_list = request.form.getlist("technique")
        requested_search_fields_list = request.form.getlist("search_fields")
        if not requested_search_fields_list:
            requested_search_fields_list = None
        requested_order = request.form["sort_order"]
        requested_sort_field = request.form["sort_field"]
        requested_text_search = request.form["text_search"]
        if requested_text_search == "":
            requested_text_search = None
        search_results = data.search(
            full_list,
            techniques=requested_technique_list,
            search_fields=requested_search_fields_list,
            sort_order=requested_order,
            sort_by=requested_sort_field,
            search=requested_text_search,
        )

        return render_template(
            "list.html",
            page_name="List Page",
            sortable_fields=sortable_fields,
            searchable_fields=searchable_fields,
            project_list=search_results,
            previous_search_fields=requested_search_fields_list or [],
            previous_text_search=requested_text_search or "",
            previous_techniques=requested_technique_list,
            previous_sort_field=requested_sort_field,
            techniques=sorted(techniques.keys()),
            stylesheets=["list.css", "project-item.css", "search-box.css"],
        )

    else:
        return render_template(
            "list.html",
            page_name="List Page",
            sortable_fields=sortable_fields,
            searchable_fields=searchable_fields or [],
            project_list=full_list,
            techniques=sorted(techniques.keys()),
            stylesheets=["list.css", "project-item.css", "search-box.css"],
        )
Example #21
0
def projects():
    projects = data.load("data.json")
    if request.method == "GET":
        return render_template('list.html', projectlist=projects, listlength=len(projects))
    elif request.method == "POST":
        searchedlist = data.search(projects, search=request.form["searchstr"])
        return render_template('list.html', projectlist=searchedlist, listlength=len(searchedlist))
Example #22
0
    def setUp(self):
        #check: do we have a csv File?
        self.dir_name = os.path.dirname(__file__)
        self.dataset = os.path.join(self.dir_name,'varDecomp')

        if (not os.path.exists(self.dataset)) or 'recalc' in sys.argv:
            if not os.path.exists(self.dataset):
                os.makedirs(self.dataset)
            SP.random.seed(1)
            self.N = 200
            self.S = 1000
            self.P = 2
            self.D = {}
            self.genGeno()        
            self.genPheno()
            self.generate = True
        else:
            self.generate=False
            #self.D = data.load(os.path.join(self.dir_name,self.dataset))
            self.D = data.load(self.dataset)
            self.N = self.D['X'].shape[0]
            self.S = self.D['X'].shape[1]
            self.P = self.D['Y'].shape[1]

        self.Kg = SP.dot(self.D['X'],self.D['X'].T)
        self.Kg = self.Kg/self.Kg.diagonal().mean()

        self.vc = VAR.VarianceDecomposition(self.D['Y'])
        self.vc.addRandomEffect(self.Kg,jitter=0)
        self.vc.addRandomEffect(is_noise=True,jitter=0)
        self.vc.addFixedEffect()
def index():
	db=load()
	Pname=db[0]['project_name']
	Stext=db[0]['short_description']
	Simage=db[0]['small_image']
	Ltext=db[0]['long_description']
	Bimage=db[0]['big_image']
	return render_template('index.html', Stext=Stext, Ltext=Ltext, Simage=Simage, Pname=Pname, Bimage=Bimage)
 def __init__(self, levelname):
     filename = levelname + ".txt"
     self.mapdata = []
     for line in data.load(filename, 'r').readlines():
         temp = []
         for char in line.strip():
             temp.append(int(char))
         self.mapdata.append(temp)
Example #25
0
def show_project(pid):
    try:
        db = data.load('data.json')
        project_var = data.search(db, search=str(pid), search_fields=['project_no'])
        print(project_var)
        return render_template("project.html", title="project <pid>", projectid = pid, project = project_var)
    except:
        abort(404) #give error 404
def listPage():
	if request.method == 'GET':
		db=load()
		srch=True
		techniques=get_techniques(db)
		projects=search(db, sort_by='start_date',sort_order='desc',techniques=None,search=None,search_fields=None)
		field=get_fields(db)
		return render_template('list.html',srch=srch,projects=projects, field=field, techniques=techniques)
Example #27
0
    def sys_call(self, name, arguments):
        n = intmask(runpack('>Q', self.fd.read(8)))
        expected_name = self.fd.read(n)

        n = intmask(runpack('>Q', self.fd.read(8)))
        expected_arguments = [data.load(self.fd) for i in xrange(n)]

        n = intmask(runpack('>Q', self.fd.read(8)))
        return_values = [data.load(self.fd) for i in xrange(n)]

        assert expected_name == name
        assert len(expected_arguments) == len(arguments)
        for i in xrange(len(expected_arguments)):
            expected = expected_arguments[i]
            arg = arguments[i]
            if not expected.eq(arg):
                raise Exception('expected %s to equal %s' % (expected, arg))
        return return_values
Example #28
0
File: main.py Project: vkbsb/bitris
def main():
    print "Hello from your game's main()"
    print data.load('sample.txt').read()
        # director init takes the same arguments as pyglet.window
    cocos.director.director.init(320, 480)

    # if Inventory.data['userdata']['IsFirstRun']:
    #     #TODO: take them through the story.
    #
    # else:
    # We create a new layer, an instance of HelloWorld
    hello_layer = mainmenu.MainMenu() #gameplay.GamePlay()

    # A scene that contains the layer hello_layer
    main_scene = cocos.scene.Scene (hello_layer)

    # And now, start the application, starting with main_scene
    cocos.director.director.run (main_scene)
Example #29
0
def load_font(file, size):
    "loads a font"
    file = load(file)
    try:
        font = pygame.font.Font(file, size)
    except pygame.error:
        raise SystemExit('Could not load font "%s" %s' %
                         (file, pygame.get_error()))
    return font
Example #30
0
def load_image(file):
    "loads an image"
    file = load(file)
    try:
        surface = pygame.image.load(file)
    except pygame.error:
        raise SystemExit('Could not load image "%s" %s' %
                         (file, pygame.get_error()))
    return surface.convert_alpha()
Example #31
0
from sklearn.neighbors import RadiusNeighborsClassifier
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn import random_projection

dataName = 'multimnist'
batchSize = 32
learnRate = 0.001
positive_ratio = 0.003
maxEpochs = 201
repeat = 5
hop = 3
K = 30

P_X, U_X, N_list, N_list1, N_list2, A_list, testX, testY = data.load(dataName)

A_X = P_X[0:int(positive_ratio * len(N_list))]
U_shuffle_list = [x for x in range(len(U_X))]
N_shuffle_list = [x for x in range(len(N_list))]
A_shuffle_list = [x for x in range(len(A_X))]
dim_list = [784, 128, 64, 32]

final_auc_history = []
final_pr_history = []
best_auc_history = []

trainX = np.concatenate((U_X, A_X), 0)

for rep in range(repeat):
    tf.reset_default_graph()
Example #32
0
                    help='Minimum categorical bin size',
                    type=int,
                    default=1)
parser.add_argument('-ct',
                    '--cat_trans',
                    help='Category transformation method',
                    type=str,
                    default='tgtrate')
parser.add_argument('-cv', '--cv', action='store_true')
parser.add_argument('-codetest', '--codetest', action='store_true')
parser.add_argument('-getcached', '--getcached', action='store_true')

m_params = vars(parser.parse_args())

# Load data
X, y, X_sub, ids = data.load(m_params)

xgb_param = {
    'silent': 1,
    'eta': 0.1,
    'objective': 'count:poisson',
    'min_child_weight': 3,
    'colsample_bytree': 0.9
}

# do cross validation scoring
kf = KFold(X.shape[0], n_folds=5, shuffle=True, random_state=1)
scr = np.zeros([len(kf)])
oob_pred = np.zeros(X.shape[0])
sub_pred = np.zeros((X_sub.shape[0], 5))
dtest = xgb.DMatrix(X_sub)
Example #33
0
import time

import tensorflow as tf

import config
import data
import model

parser = argparse.ArgumentParser(description='Train song embeddings.')
parser.add_argument('--config', '-c', required=True, help='Config file')
parser.add_argument('--data', '-d', required=True, help='Training data directory')
parser.add_argument('--max_steps', type=int, default=1000000, help='Number of steps to train for')
args = parser.parse_args()

config = config.load(args.config)
(train_songs, train_examples) = data.load(args.data)

print("Model configuration: ")
pprint.PrettyPrinter(indent=4).pprint(config)

input_song_ids = tf.placeholder(tf.int32, [None])
target_feature_sequences = tf.placeholder(
    tf.float32,
    [None, config['sequence_length'], config['num_features']],
)
feature_outputs = model.build(config, len(train_songs), input_song_ids, target_feature_sequences)

loss = tf.losses.mean_squared_error(target_feature_sequences, feature_outputs)

optimizer = tf.train.AdamOptimizer(config['learning_rate'])
global_step = tf.Variable(0, name='global_step', trainable=False)
Example #34
0
    "weights_L": -0.1,
    "weights_H": 0.1,
    "save": False,  #haven't implement this one yet  
    "loss": network.CrossEntropyLoss,
    "learning_C": [],
    "testing_C": []
}


def replace_value_with_definition(key_to_find, definition):
    for key in initial_setting.keys():
        if key == key_to_find:
            initial_setting[key] = definition


dataset = data.load()
features = dataset["train_data"]
targets = dataset["train_labels"]
test_features = dataset["test_data"]
test_targets = dataset["test_labels"]
m, n = features.shape
replace_value_with_definition("inputs_N", n)
replace_value_with_definition("weights_L", -1.0 / n)
replace_value_with_definition("weights_H", 1.0 / n)
print initial_setting
NN = network.NetworkFrame(initial_setting)
features_normalized, mean, std = data.normalize(features)
test_normalized, _, _ = data.normalize(test_features, mean, std)
NN.Train(features_normalized, targets, test_normalized, test_targets, 10e-5,
         100, 200, 0.001)
learning_record = NN.GetLearingRecord()
Example #35
0
parser.add_argument('--attention_out_dim', type=int, default=1)

parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--replay_size', type=int, default=100000)
parser.add_argument('--learn_start', type=int, default=1000)
parser.add_argument('--gamma', type=float, default=0.95)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=1250)

args = parser.parse_args()

#################################
############## Data #############
#################################

train_data, test_data = data.load(args.mode, args.annotations, args.max_train,
                                  args.max_test)

layout_vocab_size, object_vocab_size, text_vocab_size, text_vocab = data.get_statistics(
    train_data, test_data)

print '\n<Main> Converting to tensors'
train_layouts, train_objects, train_rewards, train_terminal, \
        train_instructions, train_indices, train_values, train_goals = data.to_tensor(train_data, text_vocab, args.annotations)

test_layouts, test_objects, test_rewards, test_terminal, \
    test_instructions, test_indices, test_values, test_goals = data.to_tensor(test_data, text_vocab, args.annotations)

# pdb.set_trace()

print '<Main> Training:', train_layouts.size(), 'x', train_objects.size(
), 'x', train_indices.size()
Example #36
0
                                     Dense)
import config

model = tf.keras.Sequential([
    Embedding(input_dim=config.VOCAB_SIZE,
              output_dim=config.EMBEDDING_DIM,
              input_length=config.MAX_LENGTH),
    Bidirectional(LSTM(128)),
    Dropout(0.5),
    Dense(64, activation="relu"),
    Dense(1, activation="sigmoid"),
])

model.compile(
    loss='binary_crossentropy',
    optimizer='adam',
    metrics=['accuracy'],
)

if __name__ == "__main__":
    import data
    import preprocessing
    df = preprocessing.preprocess(data.load("twitter"))
    train_df, validation_df = preprocessing.train_val_split(df)
    tokenizer = preprocessing.get_tokenizer(train_df)
    train_padded, validation_padded = preprocessing.tokenize(
        tokenizer, train_df, validation_df)
    history = model.fit(x=train_padded, y=train_df.label.to_numpy(), epochs=2)
    eval_loss, eval_acc = model.evaluate(x=validation_padded,
                                         y=validation_df.label.to_numpy())
Example #37
0
import discord,os,json,re,asyncio,data
from mcrcon import MCRcon
from discord.ext import commands
from mcstatus import MinecraftServer
from discord.ext.commands import is_owner
from bot import adminOrOwner,logOutput
from discord_slash import cog_ext,SlashContext


save = data.load('save')
serverStarted = False
sizes={2:'MBs',3:'GBs'}
mainDirectory = os.getcwd()
serverQuery=os.getenv('serverQuery')
servers=data.load('servers')
mc=MCRcon(os.getenv('mcRconHost'),os.getenv('mcRconPassword'),int(os.getenv('mcRconPort')))

class serverMcstarter(commands.Cog):
	def __init__(self,client): self.client = client
	@cog_ext.cog_subcommand(base='minecraft',name='start',description='starts a minecraft server.')
	async def start(self,ctx:SlashContext,server:str):
		if serverStarted: return
		try: os.chdir(servers([server,'directory']))
		except: await ctx.send('server name error')
		os.startfile('botStart.bat')
		os.chdir(mainDirectory)
		await ctx.send('okay, it\'s starting.')
		for i in range(save.read(['servers',str(ctx.guild.id),'config','maxServerStartTime'])):
			try: MinecraftServer.lookup(serverQuery).query().players.online; break
			except: asyncio.sleep(1)
		else: await ctx.send('error starting server.'); return
Example #38
0
                   image_size=(10, 10)):
    data -= data.min()
    data /= data.max()

    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = ((0, n**2 - data.shape[0]), (0, padsize),
               (0, padsize)) + ((0, 0), ) * (data.ndim - 3)
    data = np.pad(data,
                  padding,
                  mode='constant',
                  constant_values=(padval, padval))

    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose(
        (0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) +
                        data.shape[4:])

    #plt.figure(figsize=image_size)
    plt.imshow(data, cmap=cmap)
    plt.show()
    plt.axis('off')


if __name__ == "__main__":
    train_X, _, _, _, _, _, _ = data.load()

    random_idxs = np.random.randint(0, train_X.shape[0], 16)
    visualize_data(train_X[random_idxs].transpose(0, 2, 3, 1))
Example #39
0
                    filemode="w",
                    encoding="utf-8")
root_logger = logging.getLogger()

stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(
    logging.Formatter(fmt='%(levelname)s: %(message)s'))
root_logger.addHandler(stderr_handler)

if __name__ == '__main__':
    argparser = argparse.ArgumentParser(
        description='A Magic: the Gathering parser.')
    # TODO argparser.add_argument("-t", "--test", help="Run tests then exit", action="store_true")
    argparser.add_argument("-d",
                           "--debug",
                           help="Enable debug logs on stderr.",
                           action="store_true")
    argparser.add_argument(
        "-n",
        "--nodownload",
        help=
        "Disable downloading new data file. Only works if there is already an existing data file.",
        action="store_true")
    args = argparser.parse_args()
    if args.debug:
        stderr_handler.setLevel(logging.DEBUG)
    if args.nodownload:
        data.no_download = True
    card.load_cards(data.load())
Example #40
0
    def create_panels(self):
        """
        Build game panels that move around and show at various model states.

        """

        score_panel = Panel(SCORE_BOX.size, DRAW_AREA)
        score_panel.background_image = pygame.image.load(
            data.load('puzzle_info_bg.png')).convert()
        score_panel.border_image = pygame.image.load(
            data.load('puzzle_info.png')).convert()
        score_panel.border_image.set_colorkey(color.magenta)
        score_panel.show_position = SCORE_BOX.topleft
        score_panel.hide_position = (-SCORE_BOX.width, 0)
        score_panel.hide(instant=True)
        self.panels['score'] = score_panel

        puzzle_panel = Panel(PUZZLE_POS.size, DRAW_AREA)
        puzzle_panel.background_image = pygame.image.load(
            data.load('puzzle_bg.png')).convert()
        puzzle_panel.border_image = pygame.image.load(
            data.load('puzzle.png')).convert()
        puzzle_panel.border_image.set_colorkey(color.magenta)
        puzzle_panel.show_position = PUZZLE_POS.topleft
        puzzle_panel.hide_position = DRAW_AREA.bottomright
        puzzle_panel.hide(instant=True)
        self.panels['puzzle'] = puzzle_panel

        arcade_panel = Panel(ARCADE_POS.size, DRAW_AREA)
        arcade_panel.background_image = pygame.image.load(
            data.load('arcade_bg.png')).convert()
        arcade_panel.border_image = pygame.image.load(
            data.load('arcade.png')).convert()
        arcade_panel.border_image.set_colorkey(color.magenta)
        earth = pygame.image.load(data.load('earth.png')).convert()
        earth.set_colorkey(color.magenta)
        somewhere_over_the_rainbow = (random.randint(0, ARCADE_POS.width),
                                      random.randint(0, ARCADE_POS.height))
        arcade_panel.background_image.blit(earth, somewhere_over_the_rainbow)
        arcade_panel.hide_position = (0, ARCADE_POS.height)
        arcade_panel.hide(instant=True)
        self.panels['arcade'] = arcade_panel

        results_screen = pygame.image.load(data.load('results.png')).convert()
        results_panel = Panel(results_screen.get_size(), DRAW_AREA)
        results_panel.background_image = results_screen
        results_panel.show_position = (
            (DRAW_AREA.width - results_panel.rect.width) / 2,
            (DRAW_AREA.height - results_panel.rect.height) / 2)
        results_panel.hide_position = (DRAW_AREA.width, 0)
        results_panel.hide(instant=True)
        self.panels['results'] = results_panel

        msg_panel = Panel(MESSAGE_POS.size, DRAW_AREA)
        msg_panel.background_image = pygame.image.load(
            data.load('messages_bg.png')).convert()
        msg_panel.border_image = pygame.image.load(
            data.load('messages.png')).convert()
        msg_panel.border_image.set_colorkey(color.magenta)
        msg_panel.show_position = MESSAGE_POS.topleft
        msg_panel.hide_position = DRAW_AREA.topright
        msg_panel.hide(instant=True)
        self.panels['messages'] = msg_panel
Example #41
0
import numpy as np

seed = 666
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Constant definition
device = torch.device("cuda:0")
embedding_size = 300
hidden_size = 100
num_layers = 1
bidirectional = True

# Le probleme vient du count vectorizer qui vire certains mots
print("Load Dataset")
dataset = load(torch_dataset=True)["torch"]


def embedding_collate_decorator(collate_fn):
    def wrapper(batch):
        x, y, id_ = collate_fn(batch)
        return x, y

    return wrapper


collate_fn = embedding_collate_decorator(sequence_collate_fn)

indices = list(range(len(dataset)))
random.shuffle(indices)
for i, (trainindices, testindices) in enumerate(all_but_one(indices, k=10)):
Example #42
0
thresholds['456.hmmer'] = None
thresholds['453.povray'] = None
thresholds['445.gobmk'] = 0.65
thresholds['403.gcc'] = 0.505
thresholds['444.namd'] = 0.257
thresholds['429.mcf'] = 0.65
thresholds['458.sjeng'] = None
thresholds['483.xalancbmk'] = 0.69

if __name__ == '__main__':

    #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device("cpu")
    print('Loading data')
    print('Including:', [sys.argv[2]])
    lang, entries = data.load(sys.argv[1], include=[sys.argv[2]], cache=False)

    #n_iters = 250000
    #dataset = data.balanced(entries, n_iters)
    dataset = entries

    print('Testing')

    #encoder1 = torch.load('encoder.pt')
    #classifier1 = torch.load('classifier.pt')
    encoder1 = torch.load('/home/rodrigo/ml/deepopt/test-4/' + sys.argv[2] +
                          '/encoder.pt')
    classifier1 = torch.load('/home/rodrigo/ml/deepopt/test-4/' + sys.argv[2] +
                             '/classifier.pt')

    evaluateAll(dataset, encoder1, classifier1, thresholds[sys.argv[2]],
Example #43
0
            plot_loss_total = 0

    return plot_losses


if __name__ == '__main__':
    devname = "cuda" if torch.cuda.is_available() else "cpu"
    device = torch.device(devname)
    #device = torch.device("cpu")
    if devname == "cpu":
        torch.set_num_threads(16)
    print('Loading data')
    print(device)
    #print('Ignoring:',sys.argv[2:])
    #lang, entries = data.load(sys.argv[1],exclude=sys.argv[2:],cache=False)
    lang, entries = data.load(sys.argv[1], cache=False)
    #lang, entries = data.load(sys.argv[1],include="473.astar 462.libquantum ".split(),cache=False)

    #n_iters = 30000
    #dataset = data.balanced(entries, n_iters)
    print(len(entries))
    dataset = entries

    print('Training')

    embedded_size = 64
    hidden_size = 128
    encoder1 = model.Encoder(lang.n_words, embedded_size, hidden_size,
                             3).to(device)
    classifier1 = model.Classifier(hidden_size, 2).to(device)
Example #44
0
File: mdl.py Project: we1l1n/nsp
        buffer = self._enc_uttr(word_tokens)
        act_tail = self._action_nn.initial_state()
        stack_tail = self._stack_nn.initial_state()

        return self._trans_run(w_tokens, buffer, stack_tail, act_tail)

    def save_mdl(self, fpath):
        self._pc.save(fpath)

    def load_mdl(self, fpath):
        self._pc.populate(fpath)


if __name__ == '__main__':
    data.load()

    lr = ast.LogicRep(
        'answer(count(intersection(city(cityid(\'austin\', _)), loc_2(countryid(\'usa\')))))'
    )
    # lr=LogicRep('answer(state(loc_2(countryid(CountryName))))')
    # print(lr.tokens)
    # print(lr.reconvert())
    nlst = lr.parse()
    # print nlst
    ast = ast.Tree(nlst)
    # print ast.get_nlves_and_lves()
    gp_predicates = data.gp_predicates
    terms, acts = ast.get_terms_and_acts(gp_predicates)
    print terms
    print acts
    def train_oneparamset(self, indir, outdir, wdir, fid_lst_tra, fid_lst_val, params_savefile, trialstr='', cont=None):

        print('Loading all validation data at once ...')
        # X_val, Y_val = data.load_inoutset(indir, outdir, wdir, fid_lst_val, verbose=1)
        X_vals = data.load(indir, fid_lst_val, verbose=1, label='Context labels: ')
        Y_vals = data.load(outdir, fid_lst_val, verbose=1, label='Output features: ')
        X_vals, Y_vals = data.croplen([X_vals, Y_vals])
        print('    {} validation files'.format(len(fid_lst_val)))
        print('    number of validation files / train files: {:.2f}%'.format(100.0*float(len(fid_lst_val))/len(fid_lst_tra)))

        print('Model initial status before training')
        worst_val = data.cost_0pred_rmse(Y_vals)
        print("    0-pred validation RMSE = {} (100%)".format(worst_val))
        init_pred_rms = data.prediction_rms(self._model, [X_vals])
        print('    initial RMS of prediction = {}'.format(init_pred_rms))
        init_val = data.cost_model_prediction_rmse(self._model, [X_vals], Y_vals)
        best_val = None
        print("    initial validation RMSE = {} ({:.4f}%)".format(init_val, 100.0*init_val/worst_val))

        nbbatches = int(len(fid_lst_tra)/self.cfg.train_batch_size)
        print('    using {} batches of {} sentences each'.format(nbbatches, self.cfg.train_batch_size))
        print('    model #parameters={}'.format(self._model.count_params()))

        nbtrainframes = 0
        for fid in fid_lst_tra:
            X = data.loadfile(outdir, fid)
            nbtrainframes += X.shape[0]
        print('    Training set: {} sentences, #frames={} ({})'.format(len(fid_lst_tra), nbtrainframes, time.strftime('%H:%M:%S', time.gmtime((nbtrainframes*self._model.vocoder.shift)))))
        print('    #parameters/#frames={:.2f}'.format(float(self._model.count_params())/nbtrainframes))
        if self.cfg.train_nbepochs_scalewdata and not self.cfg.train_batch_lengthmax is None:
            # During an epoch, the whole data is _not_ seen by the training since cfg.train_batch_lengthmax is limited and smaller to the sentence size.
            # To compensate for it and make the config below less depedent on the data, the min ans max nbepochs are scaled according to the missing number of frames seen.
            # TODO Should consider only non-silent frames, many recordings have a lot of pre and post silences
            epochcoef = nbtrainframes/float((self.cfg.train_batch_lengthmax*len(fid_lst_tra)))
            print('    scale number of epochs wrt number of frames')
            self.cfg.train_min_nbepochs = int(self.cfg.train_min_nbepochs*epochcoef)
            self.cfg.train_max_nbepochs = int(self.cfg.train_max_nbepochs*epochcoef)
            print('        train_min_nbepochs={}'.format(self.cfg.train_min_nbepochs))
            print('        train_max_nbepochs={}'.format(self.cfg.train_max_nbepochs))

        self.prepare()  # This has to be overwritten by sub-classes

        costs = defaultdict(list)
        epochs_modelssaved = []
        epochs_durs = []
        nbnodecepochs = 0
        generator_updates = 0
        epochstart = 1
        if cont and len(glob.glob(os.path.splitext(params_savefile)[0]+'-trainingstate-last.h5*'))>0:
            print('    reloading previous training state ...')
            savedcfg, extras, rngstate = self.loadTrainingState(os.path.splitext(params_savefile)[0]+'-trainingstate-last.h5')
            np.random.set_state(rngstate)
            cost_val = extras['cost_val']
            # Restoring some local variables
            costs = extras['costs']
            epochs_modelssaved = extras['epochs_modelssaved']
            epochs_durs = extras['epochs_durs']
            generator_updates = extras['generator_updates']
            epochstart = extras['epoch']+1
            # Restore the saving criteria if only none of those 3 cfg values changed:
            if (savedcfg.train_min_nbepochs==self.cfg.train_min_nbepochs) and (savedcfg.train_max_nbepochs==self.cfg.train_max_nbepochs) and (savedcfg.train_cancel_nodecepochs==self.cfg.train_cancel_nodecepochs):
                best_val = extras['best_val']
                nbnodecepochs = extras['nbnodecepochs']

        print_log("    start training ...")
        epoch = -1
        for epoch in range(epochstart,1+self.cfg.train_max_nbepochs):
            timeepochstart = time.time()
            rndidx = np.arange(int(nbbatches*self.cfg.train_batch_size))    # Need to restart from ordered state to make the shuffling repeatable after reloading training state, the shuffling will be different anyway
            np.random.shuffle(rndidx)
            rndidxb = np.split(rndidx, nbbatches)
            cost_tra = None
            costs_tra_batches = []
            costs_tra_gen_wgan_lse_ratios = []
            load_times = []
            train_times = []
            for batchid in xrange(nbbatches):

                timeloadstart = time.time()
                print_tty('\r    Training batch {}/{}'.format(1+batchid, nbbatches))

                # Load training data online, because data is often too heavy to hold in memory
                fid_lst_trab = [fid_lst_tra[bidx] for bidx in rndidxb[batchid]]
                X_trab, Y_trab, W_trab = data.load_inoutset(indir, outdir, wdir, fid_lst_trab, length=self.cfg.train_batch_length, lengthmax=self.cfg.train_batch_lengthmax, maskpadtype=self.cfg.train_batch_padtype, cropmode=self.cfg.train_batch_cropmode)

                if 0: # Plot batch
                    import matplotlib.pyplot as plt
                    plt.ion()
                    plt.imshow(Y_trab[0,].T, origin='lower', aspect='auto', interpolation='none', cmap='jet')
                    from IPython.core.debugger import  Pdb; Pdb().set_trace()

                load_times.append(time.time()-timeloadstart)
                print_tty(' (iter load: {:.6f}s); training '.format(load_times[-1]))

                timetrainstart = time.time()

                cost_tra = self.train_on_batch(batchid, X_trab, Y_trab)  # This has to be overwritten by sub-classes

                train_times.append(time.time()-timetrainstart)

                if not cost_tra is None:
                    print_tty('err={:.4f} (iter train: {:.4f}s)                  '.format(cost_tra,train_times[-1]))
                    if np.isnan(cost_tra):                      # pragma: no cover
                        print_log('    previous costs: {}'.format(costs_tra_batches))
                        print_log('    E{} Batch {}/{} train cost = {}'.format(epoch, 1+batchid, nbbatches, cost_tra))
                        raise ValueError('ERROR: Training cost is nan!')
                    costs_tra_batches.append(cost_tra)
            print_tty('\r                                                           \r')
            costs['model_training'].append(np.mean(costs_tra_batches))

            cost_val = self.update_validation_cost(costs, X_vals, Y_vals)  # This has to be overwritten by sub-classes

            print_log("    E{}/{} {}  cost_tra={:.6f} (load:{}s train:{}s)  cost_val={:.6f} ({:.4f}% RMSE)  {} MiB GPU {} MiB RAM".format(epoch, self.cfg.train_max_nbepochs, trialstr, costs['model_training'][-1], time2str(np.sum(load_times)), time2str(np.sum(train_times)), cost_val, 100*costs['model_rmse_validation'][-1]/worst_val, tf_gpu_memused(), proc_memresident()))
            sys.stdout.flush()

            if np.isnan(cost_val): raise ValueError('ERROR: Validation cost is nan!')
            # if (self._errtype=='LSE') and (cost_val>=self.cfg.train_cancel_validthresh*worst_val): raise ValueError('ERROR: Validation cost blew up! It is higher than {} times the worst possible values'.format(self.cfg.train_cancel_validthresh)) # TODO

            self._model.save(os.path.splitext(params_savefile)[0]+'-last.h5', printfn=print_log, extras={'cost_val':cost_val})

            # Save model parameters
            if epoch>=self.cfg.train_min_nbepochs: # Assume no model is good enough before self.cfg.train_min_nbepochs
                if ((best_val is None) or (cost_val<best_val)): # Among all trials of hyper-parameter optimisation
                    best_val = cost_val
                    self._model.save(params_savefile, printfn=print_log, extras={'cost_val':cost_val}, infostr='(E{} C{:.4f})'.format(epoch, best_val))
                    epochs_modelssaved.append(epoch)
                    nbnodecepochs = 0
                else:
                    nbnodecepochs += 1

            if self.cfg.train_log_plot:
                print_log('    saving plots')
                log_plot_costs(costs, worst_val, fname=os.path.splitext(params_savefile)[0]+'-fig_costs_'+trialstr+'.svg', epochs_modelssaved=epochs_modelssaved)

                nbsamples = 2
                nbsamples = min(nbsamples, len(X_vals))
                Y_preds = []
                for sampli in xrange(nbsamples): Y_preds.append(self._model.predict(np.reshape(X_vals[sampli],[1]+[s for s in X_vals[sampli].shape]))[0,])

                plotsuffix = ''
                if len(epochs_modelssaved)>0 and epochs_modelssaved[-1]==epoch: plotsuffix='_best'
                else:                                                           plotsuffix='_last'
                log_plot_samples(Y_vals, Y_preds, nbsamples=nbsamples, fname=os.path.splitext(params_savefile)[0]+'-fig_samples_'+trialstr+plotsuffix+'.png', vocoder=self._model.vocoder, title='E{}'.format(epoch))

            epochs_durs.append(time.time()-timeepochstart)
            print_log('    ET: {}   max TT: {}s   train ~time left: {}'.format(time2str(epochs_durs[-1]), time2str(np.median(epochs_durs[-10:])*self.cfg.train_max_nbepochs), time2str(np.median(epochs_durs[-10:])*(self.cfg.train_max_nbepochs-epoch))))

            self.saveTrainingState(os.path.splitext(params_savefile)[0]+'-trainingstate-last.h5', printfn=print_log, extras={'cost_val':cost_val, 'best_val':best_val, 'costs':costs, 'epochs_modelssaved':epochs_modelssaved, 'epochs_durs':epochs_durs, 'nbnodecepochs':nbnodecepochs, 'generator_updates':generator_updates, 'epoch':epoch})

            if nbnodecepochs>=self.cfg.train_cancel_nodecepochs: # pragma: no cover
                print_log('WARNING: validation error did not decrease for {} epochs. Early stop!'.format(self.cfg.train_cancel_nodecepochs))
                break

        if best_val is None: raise ValueError('No model has been saved during training!')
        return {'epoch_stopped':epoch, 'worst_val':worst_val, 'best_epoch':epochs_modelssaved[-1] if len(epochs_modelssaved)>0 else -1, 'best_val':best_val}
Example #46
0
        if iter % record_loss_every == 0:
            plot_loss_avg = plot_loss_total / record_loss_every
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    return plot_losses


if __name__ == '__main__':

    #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device("cpu")
    print('Loading data')
    print(device)
    print('Ignoring:', sys.argv[2:])
    entries = data.load(sys.argv[1], exclude=sys.argv[2:])

    n_iters = 30000
    dataset = data.balanced(entries, n_iters)
    #dataset=entries

    print('Training')

    embedded_size = 64
    hidden_size = 128
    encoder1 = model.Encoder(hidden_size, 3).to(device)
    classifier1 = model.Classifier(hidden_size, 2).to(device)

    train(dataset, encoder1, classifier1, device=device, print_every=2000)

    print('Caching trained model')
Example #47
0
import logging, data

save = data.load('save')


def setupLogger(name, log_file, level=logging.WARNING):
    logger = logging.getLogger(name)
    formatter = logging.Formatter('[%(asctime)s.%(msecs)03d] %(message)s',
                                  '%d/%m/%Y %H:%M:%S')
    fileHandler = logging.FileHandler(log_file, mode='a', encoding='utf-8')
    fileHandler.setFormatter(formatter)
    logger.setLevel(level)
    logger.addHandler(fileHandler)
    return logger


outputLog = setupLogger('output log', 'logs/output.log')


def logOutput(log, ctx):
    try:
        log = f'{log} in {ctx.guild.name} by {ctx.author.name}'
    except:
        log = f'{log} in DMs with {ctx.author.name}'
    outputLog.warning(log)
    if save.read(['botConfig', 'outputToConsole']): print(log)
Example #48
0
    #############
    # 最適なクラスタ数を考える
    #############
    # picpath = './elbow.png'
    # vectors = data.load(vecpath)
    # elbow(vectors, picpath)
    # for i in range(2, 7):
    # 	silhouette(vectors, i)

    #############
    # k-means
    #############
    n_cluster = 4
    print('n_cluster:', n_cluster)
    mwelist = data.load(mwepath)
    vectors = data.load(vecpath)
    km = KMeans(n_clusters=n_cluster, random_state=43, n_jobs=-1)
    km.fit(vectors)

    cluster_labels = km.labels_
    cluster_to_words = defaultdict(list)
    for cluster_id, word in zip(cluster_labels, mwelist):
        cluster_to_words[cluster_id].append(word)

    for i, cluslist in cluster_to_words.items():
        cluspath = './result/clusterlist_' + str(i + 1) + '.pickle'
        data.save(cluspath, cluslist)
        print('-------------------')
        print(len(cluslist))
        # for clus in cluslist:
Example #49
0
from NTM import *
import data
import matplotlib.pyplot as plt

#use_gpu(2)

lr = 0.001
drop_rate = 0.
batch_size = 20
hidden_size = 500
latent_size = 50
# try: sgd, momentum, rmsprop, adagrad, adadelta, adam, nesterov_momentum
optimizer = "adam"
continuous = False

train_idx, valid_idx, test_idx, other_data = data.load(
    "./data/news_ap.txt")  #movie.txt, yelp.txt
[docs, dic, w2i, i2w, bg] = other_data

#for key, value in sorted(dic.iteritems(), key=lambda (k,v): (v,k)):
#    print "%s: %s" % (key, value)

dim_x = len(dic)
dim_y = dim_x
print "#features = ", dim_x, "#labels = ", dim_y

print "compiling..."
model = NTM(dim_x, dim_x, hidden_size, latent_size, bg, continuous, optimizer)

print "training..."
start = time.time()
for i in xrange(100):
                var_list=tf.trainable_variables(), colocate_gradients_with_ops=True, global_step=global_step)

train_merge = [
    tf.summary.scalar('train loss/' + 'classification_loss', train_cost),
    tf.summary.scalar('train loss/' + 'regularization_loss',
                      regularization_loss),
    tf.summary.scalar('train loss/' + 'total_loss', total_loss)
]
test_merge = [tf.summary.scalar('test accuracy/' + 'accuracy', total_accuracy)]
train_merge_op = tf.summary.merge(train_merge)
test_merge_op = tf.summary.merge(test_merge)

# Dataset iterator
train_gen_adc = data.load(TRAIN_BATCH_SIZE,
                          data_dir_1=DATA_DIR_ADC_POSITIVE,
                          data_dir_2=DATA_DIR_ADC_NEGATIVE,
                          name_list_path=TRAIN_IMAGE_LIST_PATH,
                          size=(IMAGE_SIZE, IMAGE_SIZE))
train_gen_t2 = data.load(TRAIN_BATCH_SIZE,
                         data_dir_1=DATA_DIR_T2_POSITIVE,
                         data_dir_2=DATA_DIR_T2_NEGATIVE,
                         name_list_path=TRAIN_IMAGE_LIST_PATH,
                         size=(IMAGE_SIZE, IMAGE_SIZE))
test_gen_adc = data.load(TEST_BATCH_SIZE,
                         data_dir_1=DATA_DIR_ADC_TEST,
                         name_list_path=TEST_IMAGE_LIST_PATH,
                         size=(IMAGE_SIZE, IMAGE_SIZE))
test_gen_t2 = data.load(TEST_BATCH_SIZE,
                        data_dir_1=DATA_DIR_T2_TEST,
                        name_list_path=TEST_IMAGE_LIST_PATH,
                        size=(IMAGE_SIZE, IMAGE_SIZE))
    loss_D += opt.gradlambda * loss_D_grad
    # ------ optimizer ------
    varsD = [v for v in tf.global_variables() if "discrim" in v.name]
    lrD_PH = tf.placeholder(tf.float32, shape=[])
    with tf.name_scope("adam"):
        optimD = tf.train.AdamOptimizer(learning_rate=lrD_PH).minimize(
            loss_D, var_list=varsD)
    # ------ generate summaries ------
    summaryLossTrain = tf.summary.scalar("TRAIN_loss_D", loss_D)
    summaryGradTrain = tf.summary.scalar("TRAIN_grad_D", grad_D_norm_mean)
    summaryImageTrain = tf.summary.merge(summaryImageTrain)
    summaryImageTest = tf.summary.merge(summaryImageTest)

# load data
print(util.toMagenta("loading training data..."))
trainData = data.load(opt)
print(util.toMagenta("loading test data..."))
testData = data.load(opt, test=True)

# prepare model saver/summary writer
saver_D = tf.train.Saver(var_list=varsD, max_to_keep=20)
summaryWriter = tf.summary.FileWriter("summary_{0}/{1}".format(
    opt.group, opt.model))

print(util.toYellow("======= TRAINING START ======="))
timeStart = time.time()
# start session
tfConfig = tf.ConfigProto(allow_soft_placement=True)
tfConfig.gpu_options.allow_growth = True
with tf.Session(config=tfConfig) as sess:
    sess.run(tf.global_variables_initializer())
import metasense
import data
from data import load
import matching_epa
from matching_epa import match5Second
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt

data = load( 1, 'donovan', 17)
print(data[0])
#train = data[0]
#test = data[1]
#epa = data[2]
#print(epa)
#train = train.sort_values(by=['Ts'])
#print(train)
#plt.scatter(train.index.values[1 : 10], (train['no2'])[1 : 10], s = 0.5)
#plt.xlabel('time (5 seconds)')
#plt.ylabel('o3 (Active - Working Voltage)')  
#plt.savefig("my_plots/5second.png")
#plt.clf()
#data = match5Second( 2, 'donovan', 15)
#data = match5Second( 2, 'donovan', 18) #TODO
#data = match5Second( 2, 'donovan', 20)
#print("el cajon, round 1, board 12")
#data = match5Second( 1, 'elcajon', 12)
#print("el cajon, round 1, board 13")
#data = match5Second( 1, 'elcajon', 13)
#print("el cajon, round 3, board 15")
Example #53
0
import os
from os.path import join as pjoin
import warnings

import data
import segment
import parallel

base = os.path.dirname(__file__)
output = pjoin(base, '../output')

outputs = {'output': output, 'segmentation': pjoin(output, 'segmented')}

for folder in outputs.values():
    if not os.path.isdir(folder):
        print('Creating {}'.format(folder))
        os.mkdir(folder)

images = data.load('dr')

with warnings.catch_warnings():
    warnings.simplefilter("ignore", category=UserWarning)

    parallel.apply_parallel(images,
                            segment.segment,
                            output_dir=outputs['segmentation'],
                            postfix='segmented')
Example #54
0
 def generator_valid():
     while True:
         dat, lbl, msk = data.load(mode='valid', return_mask=True)
         yield (dat[0], lbl[0, ..., 0], msk[0])
Example #55
0
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    set_cache_regions_from_settings(settings)

    engine = engine_from_config(settings, 'sqlalchemy.')
    mistic.app.tables.create(engine)
    mistic.app.tables.Base.metadata.create_all(engine)

    ## - Debugging purpose --
    #favorite = mistic.app.tables.FavoriteDatasetStore.getall(mistic.app.tables.DBSession(), 'boucherg')
    #print 'Favorite', favorite

    ## Adding dummy info in table
    #mistic.app.tables.FavoriteDatasetStore.record(mistic.app.tables.DBSession(), 'new_user', 'new_dataset')

    #favorite = mistic.app.tables.FavoriteDatasetStore.getall(mistic.app.tables.DBSession(), 'new_user')
    #print '------------------------------------'
    #print 'Favorite', favorite

    ## -- End of debugging --

    if 'mistic.datasets' in settings:
        settings.update(
            load_datasets_settings(settings['mistic.datasets'], global_config))

    config_args = dict(root_factory=Root,
                       settings=settings,
                       default_permission='view')

    if 'mistic.basic.auth' in settings:

        if '=' not in settings['mistic.basic.auth']:
            authorized_credentials = open(settings['mistic.basic.auth'],
                                          'r').readlines()
            authorized_credentials = ' '.join(
                [a.strip() for a in authorized_credentials])

            authorized_credentials = authorized_credentials.strip()
        else:
            authorized_credentials = settings['mistic.basic.auth']

        config_args.update(
            dict(authentication_policy=BasicAuthenticationPolicy(
                http_basic_authenticator(authorized_credentials),
                realm=settings.get('mistic.basic.realm', 'mistic')),
                 authorization_policy=ACLAuthorizationPolicy()))

    my_session_factory = UnencryptedCookieSessionFactoryConfig(
        'myouwnsekreetfactory')
    config_args.update(dict(session_factory=my_session_factory))

    config = Configurator(**config_args)

    def authorize(request):
        return HTTPUnauthorized(
            headers=forget(request)
        )  ## Response(body='hello world!', content_type='text/plain')

    config.add_view(authorize,
                    context=HTTPForbidden,
                    permission=NO_PERMISSION_REQUIRED)

    if 'mistic.data' not in settings:
        raise exceptions.RuntimeError('no dataset configuration supplied')

    data.load(settings['mistic.data'])

    if 'mistic.rsvg-convert' in settings:
        mistic.app.views.pdffile.PDFData.rsvg_convert = settings[
            'mistic.rsvg-convert']
    if 'mistic.phantomjs' in settings:
        mistic.app.views.pdffile.PDFData.phantomjs = settings[
            'mistic.phantomjs']

    if 'mistic.forward_host' in settings:
        config.registry.settings['mistic_forward_host'] = settings[
            'mistic.forward_host']

    config.add_route('mistic.template.root', '/')

    config.add_route('mistic.modal.datasets', '/modal/datasets')
    # params: go=GO_ID - filter the list of returned genes by GO term.
    config.add_route('mistic.json.annotation.genes',
                     '/annotations/{annotation}/genes')
    config.add_route('mistic.json.annotation.gene_ids',
                     '/annotations/{annotation}/gene_ids')
    config.add_route('mistic.json.annotation.gene',
                     '/annotations/{annotation}/genes/{gene_id}')
    config.add_route('mistic.json.annotation.gene.gs',
                     '/annotations/{annotation}/genes/{gene_id}/gs*gsid')
    config.add_route('mistic.json.annotation.gene.name',
                     '/annotations/{annotation}/genes/{gene_id}/name')
    config.add_route('mistic.json.annotation.gene.symbol',
                     '/annotations/{annotation}/genes/{gene_id}/symbol')

    config.add_route('mistic.json.annotation.gs',
                     '/annotations/{annotation}/gs')

    config.add_route('mistic.json.cannotation.items',
                     '/cannotations/datasets/{dataset}')

    config.add_route('mistic.json.datasets', '/datasets')
    config.add_route('mistic.json.dataset', '/datasets/{dataset}')
    config.add_route('mistic.json.dataset.search',
                     '/datasets/{dataset}/search')

    config.add_route('mistic.json.dataset.sampleinfo',
                     '/datasets/{dataset}/sampleinfo')
    config.add_route('mistic.json.dataset.samples',
                     '/datasets/{dataset}/samples')
    config.add_route('mistic.json.dataset.samples.enrich',
                     '/datasets/{dataset}/samples/enrichment')
    config.add_route('mistic.json.dataset.geneset.enrich',
                     '/datasets/{dataset}/geneset/enrichment')
    config.add_route('mistic.json.sample',
                     '/datasets/{dataset}/samples/{sample_id}')

    config.add_route('mistic.json.dataset.sampleinfo.search',
                     '/datasets/{dataset}/sampleinfo/search')

    config.add_route('mistic.json.dataset.mds', '/datasets/{dataset}/mds')
    config.add_route('mistic.json.dataset.mst',
                     '/datasets/{dataset}/mst/{xform}')
    config.add_route('mistic.json.dataset.mapped_mst',
                     '/datasets/{dataset}/mst/{xform}/{tgt_annotation}')
    config.add_route('mistic.json.dataset.extract',
                     '/datasets/{dataset}/extract/{xform}')
    config.add_route('mistic.json.dataset.extractSave',
                     '/datasets/{dataset}/extractSave/{xform}')

    config.add_route('mistic.json.dataset.genes', '/datasets/{dataset}/genes')
    config.add_route('mistic.json.gene', '/datasets/{dataset}/genes/{gene_id}')
    config.add_route('mistic.json.gene.corr',
                     '/datasets/{dataset}/genes/{gene_id}/corr')
    config.add_route('mistic.json.gene.expr',
                     '/datasets/{dataset}/genes/{gene_id}/expr')
    config.add_route('mistic.json.gene.utest',
                     '/datasets/{dataset}/genes/{gene_id}/utest')
    config.add_route('mistic.json.gene.gorilla',
                     '/datasets/{dataset}/genes/{gene_id}/gorilla')

    config.add_route('mistic.json.go', '/go')
    config.add_route('mistic.json.go.search', '/go/search')
    config.add_route('mistic.json.go.id', '/go/{go_id}')
    config.add_route('mistic.json.go.name', '/go/{go_id}/name')

    config.add_route('mistic.json.attr.set', '/attr')
    config.add_route('mistic.json.attr.get', '/attr/{id}')

    config.add_route('mistic.json.dataset.fav.record',
                     'record_favorite_dataset')
    config.add_route('mistic.json.dataset.fav.get', 'get_favorite_dataset')

    config.add_route('mistic.json.saveValueInSession', '/saveInSession')

    config.add_route('mistic.template.help', '/help')
    config.add_route('mistic.template.corrgraph', '/genecorr/{dataset}')
    config.add_route('mistic.template.scatterplot', '/scatterplot')
    config.add_route('mistic.template.pairplot', '/pairplot/{dataset}*genes')
    config.add_route('mistic.template.mds', '/mds/{dataset}*genes')

    config.add_route('mistic.csv.root', '/csv/root')
    config.add_route('mistic.pdf.fromsvg', '/pdf')
    config.add_route('mistic.csv.corr', '/csv/genecorr/{dataset}/{gene}')
    config.add_route('mistic.csv.corrds', '/csv/genecorr/{dataset}/{gene}/all')

    config.add_route('mistic.template.clustering',
                     '/clustering/{dataset}/{xform}')
    config.add_route('mistic.template.mstplot', '/mstplot/{dataset}/{xform}')

    config.add_static_view('static', 'mistic:app/static', cache_max_age=3600)
    config.add_static_view('images',
                           'mistic:resources/images',
                           cache_max_age=3600)
    config.add_static_view('video',
                           'mistic:resources/video',
                           cache_max_age=3600)

    config.scan()

    return config.make_wsgi_app()
	# ------ define GP and D ------
	geometric = graph.geometric_multires
	# ------ geometric predictor ------
	imageFGwarpAll,pAll,_ = geometric(opt,imageBGfake,imageFGfake,pPert)
	pWarp = pAll[-1]
	# ------ composite image ------
	imageCompAll = []
	for l in range(opt.warpN+1):
		imageFGwarp = imageFGwarpAll[l]
		imageComp = graph.composite(opt,imageBGfake,imageFGwarp)
		imageCompAll.append(imageComp)
	varsGP = [v for v in tf.global_variables() if "geometric" in v.name]

# load data
print(util.toMagenta("loading test data..."))
testData = data.load(opt,test=True)

# prepare model saver/summary writer
saver_GP = tf.train.Saver(var_list=varsGP)

print(util.toYellow("======= EVALUATION START ======="))
timeStart = time.time()
# start session
tfConfig = tf.ConfigProto(allow_soft_placement=True)
tfConfig.gpu_options.allow_growth = True
with tf.Session(config=tfConfig) as sess:
	sess.run(tf.global_variables_initializer())
	util.restoreModel(opt,sess,saver_GP,opt.loadGP,"GP")
	print(util.toMagenta("start evaluation..."))

	# create directories for test image output
Example #57
0
# Start with the argument parsing, before imports, because importing takes a
# long time and we should know immediately if we gave malformed arguments
import argparse

parser = argparse.ArgumentParser(description="train NN to generate CWs")
parser.add_argument("--weights",
                    "-w",
                    type=str,
                    default='s2s.h5',
                    help="The weights file (.h5)")
args = parser.parse_args()

import model as lstm_model
import data

encoder_input_data, decoder_input_data, decoder_target_data, token_index, input_texts = data.load(
)
del decoder_input_data, decoder_target_data

_, encoder_model, decoder_model = lstm_model.generate_models(len(token_index))
# Load model
encoder_model.load_weights(args.weights, by_name=True)
decoder_model.load_weights(args.weights, by_name=True)

actual_num_samples = len(encoder_input_data)
for seq_index in range(int(actual_num_samples * 0.8),
                       int(actual_num_samples * 0.8 + 50)):
    # Take one sequence (part of the training set)
    # for trying out decoding.
    input_seq = encoder_input_data[seq_index:seq_index + 1]
    decoded_sentence = data.decode_sequence(encoder_model, decoder_model,
                                            token_index, input_seq)
Example #58
0
from __future__ import absolute_import, division, print_function

import matplotlib.pyplot as plt
import numpy as np
import wbml.out
import wbml.plot
import stheno.autograd
import lab.autograd as B
from varz.autograd import Vars, minimise_l_bfgs_b

from olmm import model, objective, predict, project
from data import load

# Load the data, which are Pandas data frames.
locs, data = load()

# Convert to NumPy.
locs = locs.to_numpy()
x_data = data.index.to_numpy()[:, None]
y_data = data.to_numpy()

# Inputs for two-months ahead predictions.
x_pred = np.arange(1, x_data.max() + 60, dtype=np.float64)[:, None]

# Normalise data.
data_mean = np.mean(y_data, axis=0, keepdims=True)
data_scale = np.std(y_data, axis=0, keepdims=True)
y_data_norm = (y_data - data_scale) / data_mean

# Model parameters:
Example #59
0
def train(args):
    # setup metric logging. It's important to log your loss!!
    log_f = open(args.log_file, 'w')
    fieldnames = ['step', 'train_loss', 'train_acc', 'dev_loss', 'dev_acc']
    logger = csv.DictWriter(log_f, fieldnames)
    logger.writeheader()

    # load data
    train_data, train_labels = load(args.data_dir, split="train")
    dev_data, dev_labels = load(args.data_dir, split="dev")

    # Build model
    if args.model.lower() == "simple-ff":
        model = FeedForward(args.ff_hunits)
    elif args.model.lower() == "simple-cnn":
        model = SimpleConvNN(args.cnn_n1_channels, args.cnn_n1_kernel,
                             args.cnn_n2_kernel)
    elif args.model.lower() == "best":
        # TODO: Feel free to change in initialization arguments here to take
        # whatever parameters you need.
        model = BestNN()
    else:
        raise Exception("Unknown model type passed in!")

    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    # TODO: You can change this loop as you need to, to optimize your training!
    # for example, if you wanted to implement early stopping to make sure you
    # don't overfit your model, you would do so in this loop.
    for step in range(args.train_steps):
        # run the model and backprop for train steps
        i = np.random.choice(train_data.shape[0],
                             size=args.batch_size,
                             replace=False)
        x = torch.from_numpy(train_data[i].astype(np.float32))
        y = torch.from_numpy(train_labels[i].astype(np.int))

        # Forward pass: Get logits for x
        logits = model(x)
        # Compute loss
        loss = F.cross_entropy(logits, y)
        # Zero gradients, perform a backward pass, and update the weights.
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        #every 100 steps, log metrics

        if step % 100 == 0:
            train_acc, train_loss = approx_train_acc_and_loss(
                model, train_data, train_labels)
            dev_acc, dev_loss = dev_acc_and_loss(model, dev_data, dev_labels)

            step_metrics = {
                'step': step,
                'train_loss': loss.item(),
                'train_acc': train_acc,
                'dev_loss': dev_loss,
                'dev_acc': dev_acc
            }

            print(
                f'On step {step}: Train loss {train_loss} | Dev acc is {dev_acc}'
            )
            logger.writerow(step_metrics)

    # close the log file
    log_f.close()
    # save model
    print(f'Done training. Saving model at {args.model_save}')
    torch.save(model, args.model_save)
Example #60
0
def train(args):
    np.random.seed(42)
    torch.manual_seed(42)

    # setup metric logging. It's important to log your loss!!
    log_f = open(args.log_file, 'w')
    fieldnames = ['step', 'train_loss', 'train_acc', 'dev_loss', 'dev_acc']
    logger = csv.DictWriter(log_f, fieldnames)
    logger.writeheader()

    # load data
    train_data, train_labels = load(args.data_dir, split="train")
    dev_data, dev_labels = load(args.data_dir, split="dev")

    # Build model
    if args.model.lower() == "simple-ff":
        model = FeedForward(args.ff_hunits)
    elif args.model.lower() == "simple-cnn":
        model = SimpleConvNN(args.cnn_n1_channels,
                            args.cnn_n1_kernel,
                            args.cnn_n2_kernel)
    elif args.model.lower() == "best":
        model = BestNN(args.best_n1_channels,
                       args.best_n2_channels,
                       args.best_n3_channels)
    else:
        raise Exception("Unknown model type passed in!")

    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    if not args.train_my_way:
        for step in range(args.train_steps):
            # run the model and backprop for train steps
            i = np.random.choice(train_data.shape[0], size=args.batch_size, replace=False)
            x = torch.from_numpy(train_data[i].astype(np.float32))
            y = torch.from_numpy(train_labels[i].astype(np.int))

            # Forward pass: Get logits for x
            logits = model(x)
            # Compute loss
            loss = F.cross_entropy(logits, y)
            # Zero gradients, perform a backward pass, and update the weights.
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # every 100 steps, log metrics
            if step % 100 == 0:
                train_acc, train_loss = approx_train_acc_and_loss(model,
                                                                  train_data,
                                                                  train_labels)
                dev_acc, dev_loss = dev_acc_and_loss(model, dev_data, dev_labels)

                step_metrics = {
                    'step': step,
                    'train_loss': loss.item(),
                    'train_acc': train_acc,
                    'dev_loss': dev_loss,
                    'dev_acc': dev_acc
                }

                print(f'On step {step}: Train loss {train_loss} | Dev acc is {dev_acc}')
                logger.writerow(step_metrics)
        # close the log file
        log_f.close()
        # save model
        print(f'Done training. Saving model at {args.model_save}')
        torch.save(model, args.model_save)
    else:
        '''
        MY OPTIMIZATION SCHEME
        
        Three conditions decide whether to continue training
        
        1. Always train for at least 'min_steps', and no more than 'max_steps'
        2. If dev acc drops by 'stepwise_cushion' or more between measured points (every 100 steps), stop training
        3. If dev acc has improved by less than 'timesaver_cushion' in the past 1000 iteration
        '''

        # Set up improving
        last_acc = 0
        improving = True

        # Set up got_time
        last1000 = 0
        got_time = True

        step = 0

        while step <= args.max_iter and (step <= args.min_iter or (improving and got_time)):
            # run the model and backprop for train steps
            i = np.random.choice(train_data.shape[0], size=args.batch_size, replace=False)
            x = torch.from_numpy(train_data[i].astype(np.float32))
            y = torch.from_numpy(train_labels[i].astype(np.int))

            # Forward pass: Get logits for x
            logits = model(x)
            # Compute loss
            loss = F.cross_entropy(logits, y)
            # Zero gradients, perform a backward pass, and update the weights.
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # every 100 steps, log metrics
            if step % 100 == 0:
                train_acc, train_loss = approx_train_acc_and_loss(model,
                                                                  train_data,
                                                                  train_labels)
                dev_acc, dev_loss = dev_acc_and_loss(model, dev_data, dev_labels)

                step_metrics = {
                    'step': step,
                    'train_loss': loss.item(),
                    'train_acc': train_acc,
                    'dev_loss': dev_loss,
                    'dev_acc': dev_acc
                }

                print(f'On step {step}: Train loss {train_loss} | Dev acc is {dev_acc}')
                logger.writerow(step_metrics)

                # Update conditions
                diff = dev_acc - last_acc
                improving = diff > args.stepwise_cushion
                last_acc = dev_acc

                if step % 1000 == 0:
                    got_time = dev_acc - last1000 > args.timesaver_cushion
                    last1000 = dev_acc

            step += 1

        # close the log file
        log_f.close()
        # save model
        print(f'Done training. Saving model at {args.model_save}')
        torch.save(model, args.model_save)