Esempio n. 1
0
 def __init__(self, name, coord, centre=True):  # eg ('plus',(30,40))
     (x1, y1) = coord
     self._instances.append(self)
     up = utils.load_image(name + "_up.png", True)
     down = utils.load_image(name + "_down.png", True)
     w = up.get_width()
     h = up.get_height()
     x = x1
     y = y1
     if centre:
         self.cx = x
         self.cy = y
         x = x - w / 2
         y = y - h / 2
     else:
         self.cx = x + w / 2
         self.cy = y + h / 2
     self.rect = pygame.Rect(x, y, w, h)
     self.name = name
     self.x = x
     self.y = y
     self.active = True
     self.up = up
     self.down = down
     self.stay_down = False
Esempio n. 2
0
def main():
    #Initialize
    pygame.init()
    screen = pygame.display.set_mode((width, height))
    pygame.display.set_caption(WINDOW_TITLE)

    screen.fill(bg_color)
    clock = pygame.time.Clock()
    font = pygame.font.Font(font_path, fonth)

    img_reglas = load_image(reglas_path)
    img_creditos = load_image(creditos_path)

    pygame.mixer.music.load(menu_music)
    pygame.mixer.music.play()

    while True:

        menu = Menu(screen, font, ['Jugar', 'Reglas', 'Creditos', 'Salir'])
        op = menu.main_loop()
        
        if op == 0:    
            game = SC(screen, clock)
            game.main_loop()
        elif op == 1:
            SimpleScreen(screen, img_reglas).main_loop()
        elif op == 2:
            SimpleScreen(screen, img_creditos).main_loop()
        elif op == 3:
            sys.exit(0)
Esempio n. 3
0
    def start(self):
        """Mandatory method."""
        self.GoodSound=utils.load_sound(os.path.join(self.CPdatadir, 'good.ogg'))
        self.WrongSound=utils.load_sound(os.path.join(self.CPdatadir, 'wrong.ogg'))
        p = os.path.join(self.CPdatadir,'good_%s.png' % self.lang)
        if not os.path.exists(p):
            p = os.path.join(self.CPdatadir,'thumbs.png')
        self.ThumbsUp = SPSpriteUtils.MySprite(utils.load_image(p))

        i = utils.load_image(os.path.join(self.my_datadir, 'hint.png'))
        i_ro = utils.load_image(os.path.join(self.my_datadir, 'hint_ro.png'))
        self.HintBut = SimpleTransImgButton(i,i_ro, (370, 200))
        if int(self.rchash[self.theme]['show_errors']):
            self.wrongImg = SPSpriteUtils.MySprite(utils.load_image(os.path.join(self.my_datadir, 'incorrect.png')))
        else:
            self.wrongImg = None
        prev = os.path.join(self.my_datadir, 'findit_prev.png')
        prev_ro = os.path.join(self.my_datadir, 'findit_prev_ro.png')
        next = os.path.join(self.my_datadir, 'findit_next.png')
        next_ro = os.path.join(self.my_datadir, 'findit_next_ro.png')
        self.prevnextBut = TransPrevNextButton((370, 460), \
                                              self._cbf_prevnext_button, \
                                              prev, prev_ro, next, next_ro)
        self.imgdir = os.path.join(self.my_datadir, 'images', self.theme)
        if not os.path.exists(self.imgdir):
            self.imgdir = os.path.join(self.my_datadir, 'images','default')                                      
        self.score = 0
        self.AreWeDT = False
        # get language code
        loclang = utils.get_locale_local()[0]
Esempio n. 4
0
 def start_exercise(self):
     if self.blit_pos[1] == 0:
         y = 10
     else:
         y = 110
     
     k = random.choice(self.ImgDiffHash.keys())
     
     v = self.ImgDiffHash[k]
     del self.ImgDiffHash[k]
     ImgA = Img_Display(utils.load_image(k+'A.jpg'), v,\
                                  (self.GoodSound, self.WrongSound), self.wrongImg, self.rchash)
     ImgA.set_position(15, y)
     ImgA.name = 'ImgA'# needed for debugging only
     ImgB = Img_Display(utils.load_image(k+'B.jpg'), v,\
                              (self.GoodSound, self.WrongSound), self.wrongImg, self.rchash)
     ImgB.set_position(435, y)
     ImgB.name = 'ImgB'
     ImgA.register_observer(ImgB.observer)
     ImgB.register_observer(ImgA.observer)
     self.currentlist = [ImgA, ImgB]
     self.HintBut.connect_callback(ImgA.show_hint, MOUSEBUTTONUP, self)
     self.HintBut.display_sprite()
     #self.prevnextBut.enable(False)
     self.actives.add(self.HintBut)
     self.actives.add([ImgA, ImgB])
     if self.previous_screen and not self.AreWeDT:
         self.actives.add(self.prevnextBut.get_actives())
         self.prevnextBut.enable(True)
     self.actives.redraw()
     self.ImgA, self.ImgB = ImgA, ImgB
     self.done += 1
Esempio n. 5
0
 def __init__(self):
     # Setup GUI
     self.playerselection = 0
     defaultStyle.init(gui)
     
     self.desktop = gui.Desktop()
     self.win = gui.Window(position = (800,0), size = (200,600), parent = self.desktop, text = 'Statistics and Help')
     self.lbl = gui.Label(position = (2, 30), text = '---- Mini-map Key ----', parent = self.win)
     
     self.movbtn = gui.OnImageButton(utils.load_image('movebx.png'), position = (15, 50), parent = self.win)
     self.mvlbl = gui.Label(position = (25, 45), text = 'Moving / Living Player', parent = self.win)
     
     self.atkbtn = gui.OnImageButton(utils.load_image('atkbx.png'), position = (15, 70), parent = self.win)
     self.atklbl = gui.Label(position = (25, 65), text = 'Attacking Player', parent = self.win)
     
     self.deadbtn = gui.OnImageButton(utils.load_image('deadbx.png'), position = (15, 90), parent = self.win)
     self.deadlbl = gui.Label(position = (25, 85), text = 'Slain Player', parent = self.win)
     
     self.ovbtn = gui.OnImageButton(utils.load_image('ovbx.png'), position = (15, 110), parent = self.win)
     self.ovlbl = gui.Label(position = (25, 105), text = 'Overlord Player', parent = self.win)
     
     self.statslbl = gui.Label(position = (2, 130), text = '---- Player List ----', parent = self.win)
     self.playerstats = gui.ListBox(position = (2, 150), size = (196, 200), parent = self.win)
     self.playerstats.onItemSelected = self.itemSelected
     
     self.lbl_title_pos = gui.Label(position = (5, 350), text='Player (id:#): Status - ', parent = self.win)
     self.lbl_pos = gui.Label(position = (5, 365), text='Location: ( #, # )', parent = self.win)
     self.lbl_health = gui.Label(position = (5, 380), text='Health: # / 25', parent = self.win)
     self.lbl_ai_courage = gui.Label(position = (5, 395), text='AI Courage: #', parent = self.win)
     self.lbl_ai_camper = gui.Label(position = (5, 410), text='AI Camper: #', parent = self.win)
     self.lbl_ai_clingy = gui.Label(position = (5, 425), text='AI Clingy: #', parent = self.win)
     self.lbl_ai_stack = gui.Label(position = (5, 440), text='AI Stack: #', parent = self.win)
     self.player_img_alive = gui.OnImageButton(utils.load_sliced_sprites(32, 32, 'characters/zelda_atk.png')[0], position = (100,420), parent = self.win, enabled = False)
     self.player_img_dead = gui.OnImageButton(utils.load_sliced_sprites(32, 32, 'characters/zelda_dead.png')[0], position = (100,420), parent = self.win, enabled = False)
Esempio n. 6
0
def main():

    pygame.init()
    screen = pygame.display.set_mode(WINDOW)
    pygame.display.set_caption(WINDOW_TITLE)

    intro = load_image(INTRO)
    final = load_image(INTRO)

    while True:

        Play(screen, intro).play()
        level1 = load_image(NEXT0)
        screen.blit(level1, (0,0))
        pygame.display.flip()
        pygame.time.delay(2000)

        cancionwav = None

        for level in LEVELS:
            bg = load_image(level['bg'])
            cuerdas = [int(x) for x in level['cuerdas']]
            cancion = [int(x) for x in level['cancion']]
            if cancionwav:
                cancionwav.stop()
            cancionwav = load_sound(level['wav'])
            next = load_image(level['next'])
    
            Play(screen, bg, cancionwav, cuerdas).play()
            lev = Level(screen, cuerdas, cancion, cancionwav, next)
            lev.loop()

        Play(screen, final).play()
Esempio n. 7
0
    def __init__(self, names):
        super(TextureGroupIndividual, self).__init__()
        atlas = TextureAtlas(64*len(names), 64)
        self.texture = atlas.texture
        self.texture_data = []
        i=0
        for name in names:
            if not name in BLOCK_TEXTURE_DIR:
                if G.TEXTURE_PACK != 'default':
                    BLOCK_TEXTURE_DIR[name] = load_image('resources', 'texturepacks', G.TEXTURE_PACK, 'textures', 'blocks', name + '.png')
                else:
                    BLOCK_TEXTURE_DIR[name] = load_image('resources', 'texturepacks', 'textures', 'blocks', name + '.png')

            if not BLOCK_TEXTURE_DIR[name]:
                return None

            subtex = atlas.add(BLOCK_TEXTURE_DIR[name].get_region(0,0,64,64))
            for val in subtex.tex_coords:
                i += 1
                if i % 3 != 0: self.texture_data.append(val) #tex_coords has a z component we don't utilize
        #Repeat the last texture for the remaining sides
        # (top, bottom, side, side, side, side)
        # ie: ("dirt",) ("grass_top","dirt","grass_side")
        # Becomes ("dirt","dirt","dirt","dirt","dirt","dirt") ("grass_top","dirt","grass_side","grass_side","grass_side","grass_side")
        self.texture_data += self.texture_data[-8:]*(6-len(names))
Esempio n. 8
0
 def _splash_controls(self):
     Img.screen.fill((0,0,0))
     txt = _("Use these keys on your keyboard to control the bat.")
     txtlist = utils.txtfmt([txt],36)
     s1,spam = utils.text2surf(txtlist[0],18,GREEN, bold=True)
     s2,spam = utils.text2surf(txtlist[1],18,GREEN, bold=True)
     Img.screen.blit(s1,(100,200))
     Img.screen.blit(s2,(100,240))
     pygame.display.update()
     rects = []
     fsize = 24
     
     surf = pygame.Surface((60,300))
     surf.blit(utils.load_image(os.path.join(self.my_datadir,'arrow_up.png')),(4,0))
     #surf.blit(utils.load_image(os.path.join(self.my_datadir,'bat.png')),(26,120))
     surf.blit(utils.load_image(os.path.join(self.my_datadir,'arrow_down.png')),(4,240))
     surf_r = surf.convert()# copy for the right side
     
     if ONEPLAYER == 0 and PCPLAYER == 0:
         surf.blit(utils.char2surf(self.rc_dic['left_keyup'].upper(),fsize,GREEN, bold=True),(16,70))
         surf.blit(utils.char2surf(self.rc_dic['left_keydown'].upper(),fsize,GREEN, bold=True),(16,190))
     surf_r.blit(utils.char2surf(self.rc_dic['right_keyup'].upper(),fsize,GREEN, bold=True),(16,70))
     surf_r.blit(utils.char2surf(self.rc_dic['right_keydown'].upper(),fsize,GREEN, bold=True),(16,190))
     rects.append(Img.screen.blit(surf,(40,100)))
     rects.append(Img.screen.blit(surf_r,(700,100)))
     pygame.display.update(rects)
     pygame.time.wait(4000)
     self.skipssplash = 1
Esempio n. 9
0
 def pre_level(self,level):
     """Mandatory method.
     Return True to call the eventloop after this method is called."""
     self.logger.debug("pre_level called with: %s" % level)
     if not self.runme:
         return
     self.SPG.tellcore_disable_level_indicator()
     self.SPG.tellcore_disable_score_button()
     self.screen.blit(self.backsquare, (50, 110))
     txt = [_("If you ready to start the next activity, hit the 'start' button.")]
     txt = utils.txtfmt(txt, 40)
     y = 200
     for t in txt:
         surf = utils.char2surf(t,28,WHITE)
         r = self.screen.blit(surf, (80, y))
         pygame.display.update(r)
         y += 50
     startbutpath = utils.load_image(os.path.join(self.my_datadir, 'start.png'))
     startbutpath_ro = utils.load_image(os.path.join(self.my_datadir, 'start_ro.png'))
     self.startbutton = SPWidgets.TransImgButton(startbutpath, startbutpath_ro, \
                                 (300, 350), fsize=32, text= _("Start"), fcol=WHITE)
     self.startbutton.connect_callback(self._cbf, MOUSEBUTTONDOWN, 'start')
     self.startbutton.set_use_current_background(True)
     self.startbutton.display_sprite()
     self.actives.add(self.startbutton)
     self.start_loop_flag = True
     self.PB.update()
     return True 
Esempio n. 10
0
 def start(self):
     """Mandatory method."""
     self.SPG.tellcore_set_dice_minimal_level(6)
     fs = 36
     self.scoreboard = ScoreBoard(self.rc_dic, size=fs)
     Img.winner = utils.load_image(os.path.join(self.my_datadir,'winner.jpg'),1)
     Img.loser = utils.load_image(os.path.join(self.my_datadir,'loser.jpg'),1)
     self.skipstart = None# used when the user sets a predefined game play in the config file
     self.skipsplash = None
     if self.rc_dic['sound'].lower() == 'no':
         Snd.pong = utils.load_sound(os.path.join(self.my_datadir,''))
         Snd.winner = utils.load_sound(os.path.join(self.my_datadir,''))
         Snd.goal = utils.load_sound(os.path.join(self.my_datadir,''))
         Snd.bump = utils.load_sound(os.path.join(self.my_datadir,''))
     else:
         Snd.pong = utils.load_sound(os.path.join(self.my_datadir,'pick.wav'))
         Snd.winner = utils.load_music(os.path.join(self.my_datadir,'winner.ogg'))
         Snd.goal = utils.load_sound(os.path.join(self.my_datadir,'goal.wav'))
         Snd.bump = utils.load_sound(os.path.join(self.my_datadir,'bump.wav'))
     #set kind of game play
     # we only check for multi and multipc, anything else is considerd single play
     # which is the default
     if self.rc_dic['gameplay'] == 'multi':
         self.restart([[None,'2']])
         self.skipstart = 1
     elif self.rc_dic['gameplay'] == 'multipc':
         self.restart([[None,'3']])
         self.skipstart = 1
Esempio n. 11
0
    def __init__(self, stage_file="level_1"):
        self.level_data, self.rect = utils.load_image("{0}.gif".format(stage_file))
        self.ratio = 16  # ratio of pixel in stage file / pixel in game
        pygame.Surface.__init__(self, (self.rect.width * self.ratio, self.rect.height * self.ratio))
        self.counter = 0
        self.scrolled = 0
        self.fill((0, 0, 0))
        self.set_colorkey((0, 0, 0))
        self.limits = []

        self.colors = {
            "grass": (0, 0, 0, 255),
            "enemies": (255, 0, 0, 255),
            "miniboss": (0, 0, 255, 255),
            "boss": (0, 255, 0, 255),
            "bg": (229, 229, 229, 255),
        }

        cached_image = False
        # Using the previously generated image for the stage
        # TODO, create hash of the source image for the level, to detect changes in it

        self.rect = self.rect.move((1, 0))
        try:
            # Load the cached image
            cached_image, temp_rect = utils.load_image("{0}_processed.png".format(stage_file))
            self.blit(cached_image, (0, 0))
            # Detect limits
            self.calculate_limits()

        except pygame.error, message:
            print message
Esempio n. 12
0
 def __init__(self, screen_dims):
     Dude.__init__(self, screen_dims)
     self.default_image, self.rect = load_image("res/player_base.png", -1)
     self.left_shoot_image, _ = load_image("res/player_shoot_left.png", -1)
     self.right_shoot_image, _ = load_image("res/player_shoot_right.png", -1)
     self.image = self.default_image
     self.jump_power = -10
Esempio n. 13
0
 def __init__(self, path, hpath, pos=(0, 0), padding=4,text='',fsize=24, fcol=BLACK, name='', **kwargs):
     """Button which shows an image but has a fully transparent background.
     When a hover event occurs the image is changed.
     path - must be the path to the image file or a pygame surface
     hpath - must be the path to the image file or a pygame surface that will
             be shown when a hover event occurs.
     pos - position to display the box
     rect - Rect indicating the size of the box
     padding - space in pixels around the text
     name - string to indicate this object
     text - when a sting is given it will be blitted over the image.
     fsize - fontsize.
     """
     self.text = text
     self.fsize = fsize
     if type(path) in types.StringTypes:
         image = utils.load_image(path)
     else:
         image = path
     if hpath:
         if type(hpath) in types.StringTypes:
             self.hs = utils.load_image(hpath)
         else:
             self.hs = hpath
     else:
         self.hs = utils.grayscale_image(image.convert_alpha())
     if self.text:
         s = utils.char2surf(text, fsize, fcol)
         r = image.get_rect()
         sr = s.get_rect()
         sr.center = r.center
         image.blit(s, sr)
         self.hs.blit(s, sr)
     ImgButton.__init__(self, image, pos, padding=padding, name=name)
Esempio n. 14
0
    def __init__(self, screen):

        self.screen = screen
        self.allsprites =  pygame.sprite.LayeredDirty()
        
        self.background = pygame.Surface(self.screen.get_size())
        self.background, self.rect =  utils.load_image('nasa_flame.jpg', -1)
        self.player_ship = ship.ObjShip()
        
        button_image, button_rect = utils.load_image('buttons_red.png', -1)
        button_frames  = [ (0,0,116,31), (116,0,116,31), (233,0,116,31) ]
        self.button = simple_button.BtnSimple(button_image, button_rect, button_frames)
        self.button.rect.topleft = 270, 240
    
        pygame.font.init()
        self.default_font = pygame.font.SysFont("Arial", 48)
        self.screen.blit(self.background, (0,0))

        self.button_label = dirtytext.DirtyText ("Play Again", "None", 286, 245, 24, (255, 255, 255))
        self.message_victory = dirtytext.DirtyText( "You Won!", "None", 275, 200, 32, (255, 255, 0))
        
        self.allsprites.add(self.message_victory) 
        self.allsprites.add(self.button)
        self.allsprites.add(self.button_label)
        self.allsprites.add(self.player_ship)

        self.allsprites.clear(self.screen, self.background)
        
        pygame.display.flip()
        self.running = True
        self.alive = True
Esempio n. 15
0
    def setImages(self, rightAndLeftImages=['snail1Right.png', 'snail1Left.png']):
        """
        Load the different images so we can use them later when moving
        """
        self.rightAndLeftImages = rightAndLeftImages
        # Load every snail sprite
        self.image = load_image(rightAndLeftImages[0])
        self.image_down_right = load_image(rightAndLeftImages[0])
        self.image_down_left = load_image(rightAndLeftImages[1])
        self.image_up_left = pygame.transform.rotate(self.image_down_right, 180)
        self.image_up_right = pygame.transform.rotate(self.image_down_left, 180)
        self.image_left_up = pygame.transform.rotate(self.image_down_left, 270)
        self.image_left_down = pygame.transform.rotate(self.image_down_right, 270)
        self.image_right_up = pygame.transform.rotate(self.image_down_right, 90)
        self.image_right_down = pygame.transform.rotate(self.image_down_left, 90)

        if self.gravity_direction == Direction.DOWN:
            # Use the correct sprite
            self.image = self.image_down_right
        if self.gravity_direction == Direction.UP:
            # Use the correct sprite
            self.image = self.image_up_right
        if self.gravity_direction == Direction.LEFT:
            # Use the correct sprite
            self.image = self.image_left_up
        if self.gravity_direction == Direction.RIGHT:
            # Use the correct sprite
            self.image = self.image_right_up
Esempio n. 16
0
 def load_bg(self):
     fondo = load_image(FONDO)
     porta = load_image(PORTA)
     playbg = load_image(PLAYBG)
     self.bg = pygame.Surface(WINDOW)
     self.bg.blit(fondo, (0,0))
     self.bg.blit(porta, (0,340))
     self.bg.blit(playbg, (500,340))
Esempio n. 17
0
 def start(self):
     """Mandatory method."""
     Hole.img = utils.load_image(os.path.join(self.my_datadir,'hole.png'))
     Hole.size = (Hole.img.get_width(), Hole.img.get_height())
     
     self.backgr_wohole = utils.load_image(os.path.join(self.my_datadir,'backgr.png'))
     Snd.throw = os.path.join(self.my_datadir,'sndt.wav')
     Snd.hurra = os.path.join(self.my_datadir,'sndh.wav')
     Snd.great = os.path.join(self.my_datadir,'sndh.wav')
Esempio n. 18
0
 def next_level(self,level,dbmapper):
     """Mandatory method.
     Return True if there levels left.
     False when no more levels left."""
     self.Sound.dealcard1.play()
     self.logger.debug("next_level called with %s" % level)
     if level > 6: return False # We only have 6 levels
     if level == self.level:
         self.levelrestartcounter += 1
     self.level = level
     # used to record how many times the same card is shown.
     # we store it into a class namespace to make it globally available.
     Global.selected_cards = {}
     Card.Selected = None
     # db_mapper is a Python class object that provides easy access to the 
     # dbase.
     self.db_mapper = dbmapper
     # make sure we don't have any sprites left in our group
     self.actives.empty()
     # reset the screen to clear any crap from former levels
     self.clear_screen()
     # here we setup the card layout for each level (6 levels), list[0] is level 1 ect
     # (4,2) means 4 collomns of 2 rows 
     level_layout = [(3,2),(4,2),(4,3),(4,4),(5,4),(6,4)]
     # store number of cards into the db table 'cards' col
     a,b = level_layout[level-1]
     self.db_mapper.insert('cards',a*b)
     self.num_of_cards = a*b//2# how many cards do we need in total?
     self.levelupcount = 1
     # we don't keep references to them as we query the sprites themself.
     imagelist = []
     # load all the images we need
     try:
         imgdir = os.path.join(self.my_datadir, self.tile, self.theme)
         if self.rchash.has_key(self.theme):
             cardfront = self.rchash[self.theme]['cardfront']
         else:
             cardfront = self.rchash['childsplay']['cardfront']
         emptycard = utils.load_image(os.path.join(imgdir,cardfront))
         
         images = glob.glob(os.path.join(imgdir,'*A.*'))
         images = random.sample(images, self.num_of_cards)
         #self.logger.debug("Contents of my_images %s" % myimages)
         for imgfile in images:
             # load and put the images in a list together with their file name - A | B
             # which we use as a id for the sprite later on
             file = os.path.basename(imgfile)
             # The A file
             img = utils.load_image(imgfile)
             imagelist.append((img,file[:-5]))
             # The B file
             img = utils.load_image(imgfile.replace('A','B', 1))
             imagelist.append((img,file[:-5]))
     except (StandardError,utils.MyError),info:
         self.logger.exception("Can't load images for sprites: %s" % info)
         raise utils.MyError(str(info))# MyError will make the core end this game
Esempio n. 19
0
 def _setup(self, THEME, pos, actives, cbf, usebutton=True):
     self.imgstar0 = utils.load_image(os.path.join(THEMESPATH, self.theme,'star0.png'))
     self.imgstar1 = utils.load_image(os.path.join(THEMESPATH, self.theme,'star1.png'))
     self.rectstar = self.imgstar0.get_rect()
     #self.image = pygame.Surface((self.rectstar.w * self.maxlevels, self.rectstar.h))
     #self.image.fill(self.THEME['starbutton_bg_color'])
     self.image = pygame.Surface((self.rectstar.w * self.maxlevels, self.rectstar.h), SRCALPHA)
     self.image.fill((0, 0, 0, 0))
     self.pos = pos
     self._set_level(1)
Esempio n. 20
0
 def init(self, numbers, positions):
     self.totalblocks += len(numbers)
     self.blocklist = []
     for i in numbers:
         wimg = utils.load_image(os.path.join(self.datadir, 'w%s.png' % i))
         gimg = utils.load_image(os.path.join(self.datadir, 'g%s.png' % i))
         b = Block(i, positions[i-1], wimg, self.white, gimg, self.red)
         self.blocklist.append(b)
         b.connect_callback(self._cbf_block, MOUSEBUTTONDOWN, i)
     self.currentID = 1
Esempio n. 21
0
 def __init__(self, datadir, actives, goodsnd, wrongsnd, observer):
     self.actives = actives
     self.datadir = datadir
     self.good = goodsnd
     self.wrong = wrongsnd
     self.observer = observer
     self.red = utils.load_image(os.path.join(datadir, 'r.png'))
     self.white = utils.load_image(os.path.join(datadir, 'w.png'))
     self.errors = 0
     self.totalblocks = 0
Esempio n. 22
0
 def __init__(self):
     pygame.sprite.Sprite.__init__(self)
     self.image_still, self.rect = utils.load_image('tender.png')
     self.image_moving, foo = utils.load_image('tender_moving.png')
     self.image = self.image_moving
     self.cur_lane = 0
     self.num_lanes = 4
     self.state = Bartender.STILL
     #When the player goes left and right, the bartender goes "inside the bar"
     self.inside_bar = 0
Esempio n. 23
0
 def change_image(self):
     if self.mode == 0:
         image = load_image('resources', 'textures', 'inventory.png')
     elif self.mode == 1:
         image = load_image('resources', 'textures', 'inventory_when_crafting_table.png')
     elif self.mode == 2:
         image = load_image('resources', 'textures', 'inventory_when_furnace.png')
     self.frame = image_sprite(image, self.batch, 0)
     self.frame.x = (self.parent.window.width - self.frame.width) / 2
     self.frame.y = self.icon_size / 2 - 4
Esempio n. 24
0
def _init(M):
    M.button_image, M.button_highlighted, M.button_disabled = init_button_image()
    M.background_image = load_image('resources', 'textures', 'main_menu_background.png')
    M.backdrop_images = []
    M.rnd_backdrops = ('main_menu_background.png', 'main_menu_background_2.png', 'main_menu_background_3.png',
        'main_menu_background_4.png', 'main_menu_background_5.png', 'main_menu_background_6.png')

    for backdrop in M.rnd_backdrops:
        M.backdrop_images.append(load_image('resources', 'textures', backdrop))

    M.backdrop = random.choice(M.backdrop_images)
Esempio n. 25
0
 def __init__(self, screen):
     self.font = utils.load_font('chitown.ttf', 24)
     self.background, foo    = utils.load_image('background.png')
     self.logo, foo          = utils.load_image('logo.png')
     self.screen             = screen
     self.chosen_option      = 0
     self.age                = 0
     self.selected_option    = -1
     self.events             = [] #So the subclasses can inspect input events
     self.finished           = False
     self.line_height        = 60
     self.left_margin        = 152
Esempio n. 26
0
 def __init__(self, path, hpath, text, pos=(0, 0), textpos=2, padding=4, fsize=24,\
               fgcol=BLACK, name='', **kwargs):
     """Button which shows an image and text.
     path - must be the path to the image file.
     hpath - 'rollover' image path.
     pos - position to display the box
     rect - Rect indicating the size of the box
     padding - space in pixels around the text
     name - string to indicate this object
     text - text to display. String will be split on \n.
     textpos - position of the text, 1 means left from the image, 2 means right from the image
     """
     image = utils.load_image(path)
     imageh = utils.load_image(hpath)
     surflist = []
     surfhlist = []
     for line in text.split('\n'):
         if line == '':
             line = ' '
         surflist.append(utils.char2surf(line, fcol=fgcol, fsize=fsize, ttf=TTF))
         surfhlist.append(utils.char2surf(line, fcol=GREY, fsize=fsize, ttf=TTF))
     w = max([s.get_width() for s in surflist])
     h = surflist[0].get_height() 
     totalh = h * len(surflist)
     textsurf = pygame.Surface((w, totalh), SRCALPHA)
     y = 0
     for s in surflist:
         textsurf.blit(s, (0, y))
         y += h
     textsurf_h = pygame.Surface((w, totalh), SRCALPHA)
     y = 0
     for s in surfhlist:
         textsurf_h.blit(s, (0, y))
         y += h
     w = image.get_width() + w + padding * 2
     h = max(image.get_height(),  totalh)
     surf = pygame.Surface((w, h), SRCALPHA)
     hsurf = surf.copy()
     image_y = (surf.get_height() - image.get_height()) / 2
     text_y = (surf.get_height() - textsurf.get_height()) / 2
     if textpos == 1:
         surf.blit(textsurf, (0, text_y))
         surf.blit(image, (textsurf.get_width() + padding, image_y))
         hsurf.blit(textsurf_h, (0, text_y))
         hsurf.blit(imageh, (textsurf_h.get_width() + padding, image_y))
     else:
         surf.blit(image, (0, image_y))
         surf.blit(textsurf, (image.get_width() + padding, text_y))
         hsurf.blit(imageh, (0, image_y))
         hsurf.blit(textsurf_h, (imageh.get_width() + padding, text_y))
     
     TransImgButton.__init__(self, surf, hsurf, pos=pos, padding=padding, \
                                 fsize=fsize, fcol=fgcol, name=name)
Esempio n. 27
0
 def __init__(self, screen):
     pygame.Surface.__init__(self, screen) #call Sprite intializer
     self.convert()
     self.screen = screen
     self.fill((250, 250, 250))
     self.back_image, self.back_rect = utils.load_image('background.jpg');
     self.warning_image, self.warning_rect = utils.load_image('warning.png');
     self.back_rect_init = self.back_rect.copy()
     self.status = Background.IDLE
     #counter to switch warning on and off
     self.status_count = 0
     self.status_count_max = 50
     self.counter = 0
Esempio n. 28
0
def get_func_exprs():
    """
    Prompt for a list of functions the user wants to see graphed.

    As mentioned previously, anything entered remains for the duration
    of the program, so you can graph something, change the settings, and
    return to find your functions still there.
    """
    def change_colors(item):
        # index new color
        item_index = button_entries.index(item)
        COL_POS_LIST[item_index] += 1 
        # reset index if gone too far
        if COL_POS_LIST[item_index] >= len(COLOR_LIST):
            COL_POS_LIST[item_index] = 0
        # create new Button image
        col = item.image.copy()
        col.fill(COLOR_LIST[COL_POS_LIST[item_index]])
        # update Button
        item.set_new_image(col)
        item.color = COLOR_LIST[COL_POS_LIST[item_index]]
        
    box = gui.Container()
    box.add(
        gui.Button(10, 10, utils.load_image("cube.bmp", True), "Back"))
    box.add(
        gui.Button(constants.SCR_WIDTH - 70, 10,
                   utils.load_image("cube.bmp", False), "Graph!"))
    
    box.add(text_entries)
    box.add(button_entries)
    
    while True:
        if pygame.event.peek(QUIT):
            utils.quit()

        box.update(*pygame.event.get())
        for item in box:
            if isinstance(item, gui.Button) and item.clicked:
                item.clicked = False
                if item.text == "Back":
                    return
                elif item.text == "":
                    change_colors(item)
                elif item.text == "Graph!":
                    exprs = [(x.expr_text, y.color) for x, y in zip(text_entries, button_entries) if x.expr_text]
                    draw_graph(exprs)

        blit_background()
        box.draw(SCREEN)
        pygame.display.update()
Esempio n. 29
0
    def __init__ (self,
                  width=IMAGE_SIZE,
                  height=IMAGE_SIZE,
                  frame_color=None,
                  prepare_btn_cb=prepare_btn,
                  method=RESIZE_CUT,
                  image_dir=None,
                  parent=None):
        gtk.Table.__init__(self, 2,5,False)
        self._parent = parent
        self._signals = []
        self.width = width
        self.height = height
        self.image = gtk.Image()
        self.method = method
        #self.set_myownpath(MYOWNPIC_FOLDER)
        img_box = BorderFrame(border_color=frame_color)
        img_box.add(self.image)
        img_box.set_border_width(5)
        self._signals.append((img_box, img_box.connect('button_press_event', self.emit_image_pressed)))
        self.attach(img_box, 0,5,0,1,0,0)
        self.attach(gtk.Label(), 0,1,1,2)
        self.bl = gtk.Button()

        il = gtk.Image()
        il.set_from_pixbuf(load_image(os.path.join(iconpath, 'arrow_left.png')))
        self.bl.set_image(il)

        self.bl.connect('clicked', self.previous)
        self.attach(prepare_btn_cb(self.bl), 1,2,1,2,0,0)

        cteb = gtk.EventBox()
        self.cat_thumb = gtk.Image()
        self.cat_thumb.set_size_request(THUMB_SIZE, THUMB_SIZE)
        cteb.add(self.cat_thumb)
        self._signals.append((cteb, cteb.connect('button_press_event', self.emit_cat_pressed)))
        self.attach(cteb, 2,3,1,2,0,0,xpadding=10)
        
        self.br = gtk.Button()
        ir = gtk.Image()
        ir.set_from_pixbuf(load_image(os.path.join(iconpath,'arrow_right.png')))
        self.br.set_image(ir)
        self.br.connect('clicked', self.next)
        self.attach(prepare_btn_cb(self.br), 3,4,1,2,0,0)
        self.attach(gtk.Label(),4,5,1,2)
        self.filename = None
        self.show_all()
        self.image.set_size_request(width, height)
        if image_dir is None:
            image_dir = os.path.join(mmmpath, "mmm_images")
        self.set_image_dir(image_dir)
Esempio n. 30
0
def init():  # called by run()
    random.seed()
    global redraw
    global screen, w, h, font1, font2, clock
    global factor, offset, imgf, message, version_display
    global pos, pointer
    redraw = True
    version_display = False
    screen = pygame.display.get_surface()
    pygame.display.set_caption(app)
    screen.fill((70, 0, 70))
    pygame.display.flip()
    w, h = screen.get_size()
    if float(w) / float(h) > 1.5:  # widescreen
        offset = (w - 4 * h / 3) / 2  # we assume 4:3 - centre on widescreen
    else:
        h = int(0.75 * w)  # allow for toolbar - works to 4:3
        offset = 0
    factor = float(h) / 24  # measurement scaling factor (32x24 = design units)
    imgf = float(h) / 900  # image scaling factor - images built for 1200x900
    clock = pygame.time.Clock()
    if pygame.font:
        t = int(80 * imgf)
        font1 = pygame.font.Font(None, t)
        t = int(96 * imgf)
        font2 = pygame.font.Font(None, t)
    message = ""
    pos = pygame.mouse.get_pos()
    pointer = utils.load_image("pointer.png", True)
    pygame.mouse.set_visible(False)

    # this activity only
    global score, best, state, ms, message_cxy, bgd, score_cxy, best_cxy
    global help_img, help_on, help_cxy
    score = 0
    best = 0
    state = 1
    # 1 displaying given
    # 2 accepting input
    # 3 right
    # 4 wrong
    ms = pygame.time.get_ticks()
    message_cxy = None  # set in let.py
    bgd = utils.load_image("bgd.png", False)
    cy = sy(19)
    score_cxy = (sx(16), cy)
    best_cxy = (sx(26), cy)
    help_img = utils.load_image("help.png", False)
    help_cxy = (sx(16), sy(10))
    help_on = False
Esempio n. 31
0
File: app.py Progetto: qrzbing/MOOC
# -*- coding:utf-8 -*-
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import vgg16
import utils
from Nclasses import labels

# 待识别图片的路径
img_path = raw_input('Input the path and image name:')
# 对待识别图片进行预处理
img_ready = utils.load_image(img_path)
# 打印img_ready的维度
# print("img_ready shape", tf.Session().run(tf.shape(img_ready)))
# 理论上输出 [1 224 224 3]

# 打印柱状图
fig = plt.figure(u"Top-5 预测结果")

with tf.Session() as sess:
    # 给输入的特征图片占位
    images = tf.placeholder(tf.float32, [1, 224, 224, 3])
    # 实例化 vgg
    vgg = vgg16.Vgg16()
    # 运行前向神经网络结构,复现神经网络结构
    vgg.forward(images)
    # 将待识别图像作为输入喂入计算 softmax的节点vgg.prob
    # 网络的输出probability就是通过vgg16网络的前向传播过程
    # 预测出的一千个分类的概率分布
    probability = sess.run(vgg.prob, feed_dict={images: img_ready})
    # 把probability列表中概率最高的五个所对应的probabilty列表的
Esempio n. 32
0
    def gameplay(self):
        gamespeed = 4
        startMenu = False
        gameOver = False
        gameQuit = False
        playerDino = Dino(self.screen,44,47)
        new_ground = Ground(self.screen,-1*gamespeed)
        scb = Scoreboard(self.screen)
        highsc = Scoreboard(self.screen,ct.width*0.78)
        counter = 0

        cacti = pg.sprite.Group()
        pteras = pg.sprite.Group()
        clouds = pg.sprite.Group()
        last_obstacle = pg.sprite.Group()

        Cactus.containers = cacti
        Ptera.containers = pteras
        Cloud.containers = clouds

        retbutton_image,retbutton_rect = ut.load_image('replay_button.png',35,31,-1)
        gameover_image,gameover_rect = ut.load_image('game_over.png',190,11,-1)

        temp_images,temp_rect = ut.load_sprite_sheet('numbers.png',12,1,11,int(11*6/5),-1)
        HI_image = pg.Surface((22,int(11*6/5)))
        HI_rect = HI_image.get_rect()
        HI_image.fill(ct.background_col)
        HI_image.blit(temp_images[10],temp_rect)
        temp_rect.left += temp_rect.width
        HI_image.blit(temp_images[11],temp_rect)
        HI_rect.top = ct.height*0.1
        HI_rect.left = ct.width*0.73

        while not gameQuit:
            while startMenu:
                pass
            while not gameOver:
                if pg.display.get_surface() == None:
                    print("Couldn't load display surface")
                    gameQuit = True
                    gameOver = True
                else:
                    ##########

                    frame = self.cam.get_frame()
                    frame = cv2.resize(frame,self.im_shape)
                    frame = np.reshape(frame,(1,self.im_shape[0],self.im_shape[1],3))
                    out = self.model.predict(frame)
                    CLS = {0:'Center',1:'Top',2:'Bottom'}
                    print(CLS[int(out[0])],)

                    if True:
                        for event in pg.event.get():
                            if event.type==pg.QUIT:
                                gameQuit = True
                                gameOver = True

                        if out[0]==1:
                            if playerDino.rect.bottom == int(0.98*ct.height):
                                playerDino.isJumping = True
                                if pg.mixer.get_init() != None:
                                    self.jump_sound.play()
                                playerDino.movement[1] = -1*playerDino.jumpSpeed
                        elif out[0]==0:
                            playerDino.isDucking = False
                        elif out[0]==2:
                            if not (playerDino.isJumping and playerDino.isDead):
                                playerDino.isDucking = True

                    if False:
                        for event in pg.event.get():
                            if event.type == pg.QUIT:
                                gameQuit = True
                                gameOver = True

                            if event.type == pg.KEYDOWN:
                                if event.key == pg.K_SPACE:
                                    if playerDino.rect.bottom == int(0.98*ct.height):
                                        playerDino.isJumping = True
                                        if pg.mixer.get_init() != None:
                                            self.jump_sound.play()
                                        playerDino.movement[1] = -1*playerDino.jumpSpeed

                                if event.key == pg.K_DOWN:
                                    if not (playerDino.isJumping and playerDino.isDead):
                                        playerDino.isDucking = True

                            if event.type == pg.KEYUP:
                                if event.key == pg.K_DOWN:
                                    playerDino.isDucking = False
                    ##########


                for c in cacti:
                    c.movement[0] = -1*gamespeed
                    if pg.sprite.collide_mask(playerDino,c):
                        playerDino.isDead = True
                        if pg.mixer.get_init() != None:
                            self.die_sound.play()

                for p in pteras:
                    p.movement[0] = -1*gamespeed
                    if pg.sprite.collide_mask(playerDino,p):
                        playerDino.isDead = True
                        if pg.mixer.get_init() != None:
                            self.die_sound.play()

                if len(cacti) < 2:
                    if len(cacti) == 0:
                        last_obstacle.empty()
                        last_obstacle.add(Cactus(self.screen,gamespeed,40,40))
                    else:
                        for l in last_obstacle:
                            if l.rect.right < ct.width*0.7 and random.randrange(0,50) == 10:
                                last_obstacle.empty()
                                last_obstacle.add(Cactus(self.screen,gamespeed, 40, 40))

                if len(pteras) == 0 and random.randrange(0,200) == 10 and counter > 500:
                    for l in last_obstacle:
                        if l.rect.right < ct.width*0.8:
                            last_obstacle.empty()
                            last_obstacle.add(Ptera(self.screen,gamespeed, 46, 40))

                if len(clouds) < 5 and random.randrange(0,300) == 10:
                    Cloud(self.screen,ct.width,random.randrange(ct.height/5,ct.height/2))

                playerDino.update()
                cacti.update()
                pteras.update()
                clouds.update()
                new_ground.update()
                scb.update(playerDino.score)
                highsc.update(self.high_score)

                if pg.display.get_surface() != None:
                    self.screen.fill(ct.background_col)
                    new_ground.draw()
                    clouds.draw(self.screen)
                    scb.draw()
                    if self.high_score != 0:
                        highsc.draw()
                        self.screen.blit(HI_image,HI_rect)
                    cacti.draw(self.screen)
                    pteras.draw(self.screen)
                    playerDino.draw()

                    pg.display.update()
                self.clock.tick(ct.FPS)

                if playerDino.isDead:
                    gameOver = True
                    if playerDino.score > self.high_score:
                        self.high_score = playerDino.score

                if counter%700 == 699:
                    new_ground.speed -= 1
                    gamespeed += 1

                counter = (counter + 1)

            if gameQuit:
                break

            while gameOver:
                if pg.display.get_surface() == None:
                    print("Couldn't load display surface")
                    gameQuit = True
                    gameOver = False
                else:
                    for event in pg.event.get():
                        if event.type == pg.QUIT:
                            gameQuit = True
                            gameOver = False
                        if event.type == pg.KEYDOWN:
                            if event.key == pg.K_ESCAPE:
                                gameQuit = True
                                gameOver = False

                            if event.key == pg.K_RETURN or event.key == pg.K_SPACE:
                                gameOver = False
                                self.gameplay()
                highsc.update(self.high_score)
                if pg.display.get_surface() != None:
                    self.disp_gameOver_msg(retbutton_image,gameover_image)
                    if self.high_score != 0:
                        highsc.draw()
                        self.screen.blit(HI_image,HI_rect)
                    pg.display.update()
                self.clock.tick(ct.FPS)

        pg.quit()
        self.cam.stop()
        quit()
"""
Simple tester for the vgg19_trainable
"""
# import utils
# import os, os.path

# path = "/vgg19/train"

# for f in os.listdir(path):
#     img1 = utils.load_image("./test_data/tiger.jpeg")

import glob
import utils
import numpy as np

img_arr = []
i = 0

for filename in glob.glob('all_tests/*.jpg'):
    img = utils.load_image(filename)
    img_arr.append(img)
    i = i + 1
    print(i)

np.save('test.npy', img_arr)
Esempio n. 34
0
def init():  # called by run()
    random.seed()
    global redraw
    global screen, w, h, font1, font2, font3, clock
    global factor, offset, imgf, message, version_display
    global pos, pointer
    redraw = True
    version_display = False
    screen = pygame.display.get_surface()
    pygame.display.set_caption(app)
    screen.fill((255, 255, 192))
    pygame.display.flip()
    w, h = screen.get_size()
    if float(w) / float(h) > 1.5:  #widescreen
        offset = (w - 4 * h / 3) / 2  # we assume 4:3 - centre on widescreen
    else:
        h = int(.75 * w)  # allow for toolbar - works to 4:3
        offset = 0
    factor = float(h) / 24  # measurement scaling factor (32x24 = design units)
    imgf = float(
        h) / 900  # image scaling factor - all images built for 1200x900
    clock = pygame.time.Clock()
    if pygame.font:
        t = int(60 * imgf)
        font1 = pygame.font.Font(None, t)
        t = int(80 * imgf)
        font2 = pygame.font.Font(None, t)
        t = int(40 * imgf)
        font3 = pygame.font.Font(None, t)
    message = ''
    pos = pygame.mouse.get_pos()
    pointer = utils.load_image('pointer.png', True)
    pygame.mouse.set_visible(False)

    # this activity only
    global level, best, bgd, x0, message_c, scores, scores_c
    global score, target, score_c, target_c, vinc, percent, percent_xy, best_xy
    global glow, ms
    global help_img, help_on, help_cxy
    global count, count_c, complete
    best = 0
    bgd = utils.load_image('bgd.png', False)
    x0 = sx(0)
    message_c = (sx(16), sy(2))
    scores = utils.load_image('scores.png', False)
    scores_c = (sx(5.7), sy(11))
    score = 0
    target = 0
    y = 1.18 - .3
    score_c = (sx(1.8), sy(y))
    target_c = (sx(1.8), sy(y + 1.32))
    vinc = pygame.Rect(sx(1.17), sy(y + .52), sy(1.3), sy(.16))
    percent = None
    percent_xy = (sx(2.9), sy(y + .08))
    best_xy = (sx(25.5), sy(y - .5))
    glow = utils.load_image('glow.png', True)
    ms = None
    help_img = utils.load_image('help.png', False)
    help_cxy = (sx(16), sy(10.1))
    help_on = False
    count = 0
    count_c = (sx(28.5), best_xy[1] + sy(1.4))
    complete = False
Esempio n. 35
0
def init():  # called by run()
    global redraw
    global screen, w, h, font1, font2, clock
    global factor, offset, imgf, message, version_display
    global pos, pointer
    redraw = True
    version_display = False
    screen = pygame.display.get_surface()
    pygame.display.set_caption(app)
    screen.fill((70, 0, 70))
    pygame.display.flip()
    w, h = screen.get_size()
    if float(w) / float(h) > 1.5:  #widescreen
        offset = (w - 4 * h / 3) / 2  # we assume 4:3 - centre on widescreen
    else:
        h = int(.75 * w)  # allow for toolbar - works to 4:3
        offset = 0
    clock = pygame.time.Clock()
    factor = float(h) / 24  # measurement scaling factor (32x24 = design units)
    imgf = float(
        h) / 900  # image scaling factor - all images built for 1200x900
    if pygame.font:
        t = int(144 * imgf)
        font1 = pygame.font.Font(None, t)
        t = int(96 * imgf)
        font2 = pygame.font.Font(None, t)
    message = ''
    pos = pygame.mouse.get_pos()
    pointer = utils.load_image('pointer.png', True)
    pygame.mouse.set_visible(False)

    # this activity only
    global wizard, xo, won, lost, x0, y0, dd, ww, sq, grid_img, grid, current, state
    global magician, xo2, magician_grey, xo2_grey, glow_h, glow_v, yes, no, result
    # (x0,y0) top left of grid
    dd = sy(3.76)
    ww = sy(.24)  # line width
    x0 = sx(16) - 2.5 * dd - ww / 2
    y0 = sy(1.5)
    won = 0
    lost = 0
    wizard = utils.load_image('wizard.png', True)
    xo = utils.load_image('xo.png', True)
    xo2 = utils.load_image('xo2.png', True)
    magician_grey = utils.load_image('magician_grey.png', True)
    xo2_grey = utils.load_image('xo2_grey.png', True)
    magician = utils.load_image('magician.png', True)
    sq = utils.load_image('sq.png', True)
    grid_img = utils.load_image('grid.png', True)
    glow_h = utils.load_image('glow_h.png', True)
    glow_v = utils.load_image('glow_v.png', True)
    yes = utils.load_image('yes.png', True)
    no = utils.load_image('no.png', True)
    result = None
    grid = grid.Grid()
    current = 1  # 1=player 2=PC
    state = 0
Esempio n. 36
0
#coding:utf-8
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import vgg16
import utils
from Nclasses import labels

img_path = str(input('Input the path and image name:'))
img_ready = utils.load_image(img_path)  #调入图像,对测试图像进行预处理

fig = plt.figure(u"Top-5 预测结果")
with tf.Session() as sess:
    images = tf.placeholder(tf.float32, [1, 224, 224, 3])
    vgg = vgg16.Vgg16()
    vgg.forward(images)  #前向传播的过程
    #输入测试图像,完成前向传播过程
    probability = sess.run(vgg.prob, feed_dict={images: img_ready})
    top5 = np.argsort(probability[0])[-1:-6:-1]
    print("top5:", top5)
    values = []
    bar_label = []
    #输出概率组大的五种可能性,并且和标签一一列举
    for n, i in enumerate(top5):
        print("n:", n)
        print("i:", i)
        values.append(probability[0][i])
        bar_label.append(labels[i])
        #属于这个类别的概率
        print(i, ":", labels[i], "----", utils.percent(probability[0][i]))
Esempio n. 37
0
 def __getitem__(self, index):
     idx = self.ids[index]
     X = load_image('Datasets/VGG_Cell_Dataset/%03dcell.png') / 255
     Y = load_image('Datasets/VGG_Cell_Dataset/%03ddots.png') / 255
     return X, y
Esempio n. 38
0
def init():  # called by main()
    global redraw
    global screen, w, h, font1, font2, clock
    global factor, offset, imgf, message, version_display
    global pos, pointer
    redraw = True
    version_display = False
    screen = pygame.display.get_surface()
    pygame.display.set_caption(app)
    screen.fill((70, 0, 70))
    pygame.display.flip()
    w, h = screen.get_size()
    if float(w) / float(h) > 1.5:  # widescreen
        offset = (w - 4 * h / 3) / 2  # we assume 4:3 - centre on widescreen
    else:
        h = int(.75 * w)  # allow for toolbar - works to 4:3
        offset = 0
    clock = pygame.time.Clock()
    factor = float(h) / 24  # measurement scaling factor (32x24 = design units)
    imgf = float(h) / 900  # image scaling factor - images built for 1200x900
    if pygame.font:
        t = int(64 * imgf)
        font1 = pygame.font.Font(None, t)
        t = int(72 * imgf)
        font2 = pygame.font.Font(None, t)
    message = ''
    pos = pygame.mouse.get_pos()
    pointer = utils.load_image('pointer.png', True)
    pygame.mouse.set_visible(False)

    # this activity only
    global sp, sp1, sp2
    global nos_k, signs, max_n, buffr, aim, top, level, score, best, best_c
    global magician, sparkle, target, smiley, plus, times, equals, n
    global n_glow, n_pale
    global xy1, cxy2, xy3, offset1, offset2, state
    sp = sy(.3)  # space between digits in single number
    sp1 = sy(2)  # space between numbers
    sp2 = sy(1.5)  # space between numbers and symbols
    nos_k = 3  # number of numbers offered
    signs = ('=', '+', '*')
    max_n = 5  # biggest number
    buffr = []
    aim = 0
    top = []
    level = 1
    score = 0
    best = 0
    equals = utils.load_image('equals.png', True)
    magician = utils.load_image('magician.png', True)
    sparkle = utils.load_image('sparkle.png', True)
    target = utils.load_image('target.png', True)
    smiley = utils.load_image('smiley.png', True)
    plus = utils.load_image('plus.png', True)
    times = utils.load_image('times.png', True)
    best_c = (sx(10.6), sy(20.2))
    n = []  # 0 to 9 images
    n_glow = []  # ... with glow
    n_pale = []
    for i in range(10):
        img = utils.load_image(str(i) + '.png', True)
        n.append(img)
        img = utils.load_image(str(i) + 'g.png', True)
        n_glow.append(img)
        img = utils.load_image(str(i) + 's.png', True)
        n_pale.append(img)
    xy1 = sx(3), sy(3.0)
    cxy2 = sx(4), sy(10)
    xy3 = sx(3), sy(13)
    ph2 = pointer.get_height() / 2
    offset1 = n[1].get_width() / 2, n[0].get_height() - ph2
    offset2 = 0, ph2
    state = 1  # 1 = top line; 2 = ops line; 3 = wrong; 4 = right
vgg.y = tf.nn.softmax(vgg.fc9, name='result')


loss = tf.reduce_sum((vgg.y - y_)**2)
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

saver = tf.train.Saver(max_to_keep=500)

train_batch = np.zeros((batch_size, 224, 224, 3), dtype=np.float32)
batch_models = batch_size / model_views
labels = np.zeros((batch_models, class_nums), dtype=int)
perm = np.arange(train_models)

num_batches = train_models*model_views / batch_size

with tf.Session() as sess:
	sess.run(tf.global_variables_initializer())
	saver.restore(sess, './train_models/batch_size_60_epo_8_total_v2.ckpt')
	for epo in range(epoch):
		np.random.shuffle(perm)
		for i in range(num_batches):
  			for j in range(batch_models):
  				num = i*batch_models + j
            			for k in range(model_views):
                			train_batch[j*model_views + k] = utils.load_image(train_path + train_files[perm[num]*model_views + k])
				labels[j] = train_labels[perm[num]]
            		_,cost_compute = sess.run([train,loss], feed_dict={images:train_batch, y_:labels, train_mode:True})
            		print('step %d,loss %f' % (i, cost_compute))
        	if epo > 0:
			saver.save(sess, "./train_models/batch_size_{}_epo_{}_total_v2.ckpt".format(batch_size, epo+8))
Esempio n. 40
0
from engine import hamming_dist, cosine_dist
from prototype import Hashnet
from utils import load_image

if __name__ == "__main__":
    p1 = load_image('/Users/ethan/Desktop/p1.png')
    p2 = load_image('/Users/ethan/Desktop/p2.png')

    print("loading model")
    engine = Hashnet()
    engine.load('models/hashnet.h5')

    print("extracting features")
    a = engine.extract_features(p1, p2)
    codes, features = a
    print(codes.shape, features.shape)

    hamming = hamming_dist(codes[[0]], codes[[1]])
    similarity = cosine_dist(features[[0]], features[[1]])
    print(hamming, similarity)
import numpy as np
import tensorflow as tf

import vgg16
import utils

img1 = utils.load_image("./test_data/tiger.jpeg")
img2 = utils.load_image("./test_data/puzzle.jpeg")

batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))

batch = np.concatenate((batch1, batch2), 0)

with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(
        per_process_gpu_memory_fraction=0.7)))) as sess:
    images = tf.placeholder("float", [2, 224, 224, 3])
    feed_dict = {images: batch}

    vgg = vgg16.Vgg16()
    with tf.name_scope("content_vgg"):
        vgg.build(images)
        print(vgg.pool1.shape)
        print(vgg.pool2.shape)
        print(vgg.pool3.shape)
        print(vgg.pool4.shape)
        print(vgg.pool5.shape)
        print(vgg.fc6.shape)
        print(vgg.fc7.shape)
        print(vgg.fc8.shape)
def main():

    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    # initiate VGG19 model
    model_file_path = args.model_path + '/' + vgg19.MODEL_FILE_NAME
    vgg_net = vgg19.VGG19(model_file_path)

    # load content image and style image
    content_image = utils.load_image(args.content, max_size=args.max_size)
    style_image = utils.load_image(args.style, shape=(content_image.shape[1],content_image.shape[0]))

    # initial guess for output
    if args.initial_type == 'content':
        init_image = content_image
    elif args.initial_type == 'style':
        init_image = style_image
    elif args.initial_type == 'random':
        init_image = np.random.normal(size=content_image.shape, scale=np.std(content_image))

    # check input images for style-transfer
    # utils.plot_images(content_image,style_image, init_image)

    # create a map for content layers info
    CONTENT_LAYERS = {}
    for layer, weight in zip(args.content_layers,args.content_layer_weights):
        CONTENT_LAYERS[layer] = weight

    # create a map for style layers info
    STYLE_LAYERS = {}
    for layer, weight in zip(args.style_layers, args.style_layer_weights):
        STYLE_LAYERS[layer] = weight


    # open session
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

    # build the graph
    st = style_transfer.StyleTransfer(session = sess,
                                      content_layer_ids = CONTENT_LAYERS,
                                      style_layer_ids = STYLE_LAYERS,
                                      init_image = add_one_dim(init_image),
                                      content_image = add_one_dim(content_image),
                                      style_image = add_one_dim(style_image),
                                      net = vgg_net,
                                      num_iter = args.num_iter,
                                      loss_ratio = args.loss_ratio,
                                      content_loss_norm_type = args.content_loss_norm_type,
                                      )
    # launch the graph in a session
    result_image = st.update()

    # close session
    sess.close()

    # remove batch dimension
    shape = result_image.shape
    result_image = np.reshape(result_image,shape[1:])

    # save result
    utils.save_image(result_image,args.output)
"""
Simple tester for the vgg19_trainable
"""

import tensorflow as tf

import vgg19_trainable as vgg19
import utils

img1 = utils.load_image("./test_data/tiger.jpeg")
# 1-hot result for tiger
img1_true_result = [1 if i == 292 else 0 for i in xrange(1000)]

batch1 = img1.reshape((1, 224, 224, 3))

with tf.device('/cpu:0'):
    sess = tf.Session()

    images = tf.placeholder(tf.float32, [1, 224, 224, 3])
    true_out = tf.placeholder(tf.float32, [1, 1000])
    train_mode = tf.placeholder(tf.bool)

    vgg = vgg19.Vgg19('./vgg19.npy')
    vgg.build(images, train_mode)

    # number of variables used: 143667240 variables, i.e. ideal size = 548MB
    print vgg.get_var_count()

    sess.run(tf.initialize_all_variables())

    # test classification
Esempio n. 44
0
import utils

with open("vgg16.tfmodel", mode='rb') as f:
    fileContent = f.read()

graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)

images = tf.placeholder("float", [None, 224, 224, 3])

tf.import_graph_def(graph_def, input_map={"images": images})
print("graph loaded from disk")

graph = tf.get_default_graph()

cat = utils.load_image("cat.jpg")

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    print("variables initialized")

    batch = cat.reshape((1, 224, 224, 3))
    assert batch.shape == (1, 224, 224, 3)

    feed_dict = {images: batch}

    prob_tensor = graph.get_tensor_by_name("import/prob:0")
    prob = sess.run(prob_tensor, feed_dict=feed_dict)

    sess.close()
Esempio n. 45
0
    # Load image and sample mask #
    ##############################
    shape = (256, 256)
    params = {
        'maxit': 500,
        'Lips': 1,
        'lambda': 0.01,
        'x0': np.zeros((shape[0] * shape[1], 1)),
        'shape': shape,
        'verbose': True,
        'm': shape[0],
        'rate': 0.4,
        'N': shape[0] * shape[1]
    }
    PATH = 'data/me.jpg'
    image = load_image(PATH, params['shape'])

    im_us, mask = apply_random_mask(image, params['rate'])
    indices = np.nonzero(mask.flatten(order='F'))[0]
    params['indices'] = indices

    # Section (a): Comparison with 500 iterations
    im_l1, time_l1 = reconstruct_l1(image, indices, FISTA, params)
    plot_performance(image, im_us, im_l1, time_l1, 'L1')
    im_tv, time_tv = reconstruct_TV(image, indices, FISTA, params)
    plot_performance(image, im_us, im_tv, time_tv, 'TV')

    # Section (b): Comparison with 5 iterations
    params['maxit'] = 5
    im_l1, time_l1 = reconstruct_l1(image, indices, FISTA, params)
    plot_performance(image, im_us, im_l1, time_l1, '5 iterations L1')
Esempio n. 46
0
        os.makedirs(perimage_folder + 'magnified')
        os.makedirs(perimage_folder + 'difference12')
        os.makedirs(perimage_folder + 'difference20')
        #os.makedirs(perimage_folder+'direction')

    with open(perimage_folder + '%s.txt' % (video), 'w') as txt:
        txt.write('Video: %s\n' % video)
        for f, fr in enumerate(frame):
            txt.write('Frame: %d\n' % (fr))
            for nn in healthy_nn[f]:
                txt.write('%s %d\n' %
                          (h_pos_videos_se[nn], h_pos_frames_sel[nn]))

    for f, fr in enumerate(tqdm(frame)):
        healthy_seq = [
            load_image(cfg.crops_path, h_pos_videos_se[nn],
                       h_pos_frames_sel[nn]) for nn in healthy_nn[f]
        ]
        impaired_seq = [
            load_image(cfg.crops_path, i_pos_videos[nn], i_pos_frames[nn])
            for nn in impaired_nn[f]
        ]
        healthy_res, impaired_res, magnified_res = generator.extrapolate_multiple(
            healthy_seq, h_pos_feat_sel[healthy_nn[f]], impaired_seq,
            i_pos_feat[impaired_nn[f]], [0.0, 1.0, args.lambdas])
        diff_image12, flow_filtered, X, Y, _ = find_differences_cc(
            healthy_res, impaired_res, magnified_res, Th=0.12, scale=20)
        diff_image20, flow_filtered, X, Y, _ = find_differences_cc(
            healthy_res, impaired_res, magnified_res, Th=0.20, scale=20)
        #plt.figure()
        #plt.imshow(impaired_res); _=subplot[4][f].axis('Off')
        #plt.quiver(X, Y, flow_filtered[:,0],flow_filtered[:,1], width=0.1,
Esempio n. 47
0
conf.output_dir = utils.abspath(conf.output_dir)

# load network architecture module
architecture = utils.load_module(conf.model)

# get max_samples_training random training samples
n_inputs = len(conf.input_training)
files_train_input = [
    utils.get_files_in_folder(folder) for folder in conf.input_training
]
files_train_label = utils.get_files_in_folder(conf.label_training)
_, idcs = utils.sample_list(files_train_label,
                            n_samples=conf.max_samples_training)
files_train_input = [np.take(f, idcs) for f in files_train_input]
files_train_label = np.take(files_train_label, idcs)
image_shape_original_input = utils.load_image(
    files_train_input[0][0]).shape[0:2]
image_shape_original_label = utils.load_image(files_train_label[0]).shape[0:2]
print(f"Found {len(files_train_label)} training samples")

# get max_samples_validation random validation samples
files_valid_input = [
    utils.get_files_in_folder(folder) for folder in conf.input_validation
]
files_valid_label = utils.get_files_in_folder(conf.label_validation)
_, idcs = utils.sample_list(files_valid_label,
                            n_samples=conf.max_samples_validation)
files_valid_input = [np.take(f, idcs) for f in files_valid_input]
files_valid_label = np.take(files_valid_label, idcs)
print(f"Found {len(files_valid_label)} validation samples")

# parse one-hot-conversion.xml
 def current_frame_image(self, index):
     print(f'Timeline::current_frame_index({index})')
     return utils.load_image(self.image_sources[index], numpy.ndarray)
Esempio n. 49
0
    args = get_args()

    images_paths = get_images_paths_to_deblur(args)
    deblurNN = create_network(args)

    n = len(images_paths)
    total_deblur_time = 0
    pbar = tqdm(images_paths, total=n)

    print('\n\n*********************************************************')
    print(f'Saving results to {os.path.realpath(args.out_dir)}')
    print('*********************************************************\n\n')
    for i, image_path in enumerate(pbar):
        pbar.set_description_str(f'Deblurring image {os.path.basename(image_path)}')

        blurry_image = utils.load_image(image_path)

        t = time.time()
        # the result image is a float image in range 0-255
        deblurred_image_float = deblurNN.deblur(blurry_image)
        # the first image usually takes much longer (due to libraries loading and similar stuff) so we discard its time
        if i == 0:
            total_deblur_time += time.time() - t

        deblurred_image = np.round(deblurred_image_float).astype(np.uint8)

        out_image = deblurred_image
        if args.side_by_side:
            out_image = np.concatenate([blurry_image, deblurred_image], axis=1)

        utils.save_image(create_output_filename(args, image_path), out_image)
Esempio n. 50
0
 def __init__(self, path, position):
     pygame.sprite.Sprite.__init__(self)
     self.image = load_image(path).convert_alpha()
     self.position = position
Esempio n. 51
0
        if os.path.splitext(test_label_file)[0].split(
                "/")[-1] == text_classes[cls_i]:
            class_code = [1 if j == cls_i else 0 for j in range(num_classes)]
    with open(test_label_file, "r") as f:
        lines = f.readlines()
        for line in lines:
            test_image_file_name = line.strip("\n")
            print(test_image_file_name)
            test_image_file_label_index[test_image_file_name] = class_code
            # print(test_image_file_name, class_code)
# load all test images
#print (test_image_file_label_index)
for test_image_file_name in os.listdir(path_dataset + "test"):
    if test_image_file_name.startswith('.'):
        continue
    test_image = utils.load_image(path_dataset + "test/" +
                                  test_image_file_name)
    test_dataset_images.append(test_image)
    #print("load test image:", test_image.shape, test_image_file_name)
    #print(test_image_file_name, test_image_file_label_index[test_image_file_name])
    test_dataset_labels.append(
        test_image_file_label_index[test_image_file_name])
# convert list into array
#print (test_dataset_labels)
test_dataset_images = np.array(test_dataset_images)
test_dataset_labels = np.array(test_dataset_labels)
#lb = preprocessing.LabelBinarizer()
#test_dataset_labels = lb.fit_transform(test_dataset_labels)
# reshape for tensor
test_dataset_images = test_dataset_images.reshape((num_test_size, 224, 224, 3))

if __name__ == '__main__':
    def __init__(self, content_layer_ids, style_layer_ids, content_images,
                 style_images, session, net, num_epochs, batch_size,
                 content_weight, style_weight, tv_weight, learn_rate,
                 save_path, check_period, test_image, max_size, style_name):

        self.net = net
        self.sess = session
        self.style_name = style_name
        # sort layers info
        self.CONTENT_LAYERS = collections.OrderedDict(
            sorted(content_layer_ids.items()))
        self.STYLE_LAYERS = collections.OrderedDict(
            sorted(style_layer_ids.items()))

        # input images
        self.x_list = content_images
        mod = len(content_images) % batch_size
        self.x_list = self.x_list[:-mod]
        self.y_list = style_images

        self.content_size = len(self.x_list)

        # parameters for optimization
        self.num_epochs = num_epochs
        self.content_weight = content_weight
        self.style_weight = style_weight
        self.tv_weight = tv_weight
        self.learn_rate = learn_rate
        self.batch_size = batch_size
        self.check_period = check_period

        # path for model to be saved
        self.save_path = save_path

        # image transform network
        self.transform = Transform(len(self.y_list))
        self.tester = Transform(len(self.y_list), 'test')

        # build graph for style transfer
        self._build_graph()

        # test during training
        if test_image is not None:
            self.TEST = True

            # load content image
            self.test_image = load_image(test_image, max_size=max_size)

            # build graph
            self.x_test = tf.placeholder(tf.float32,
                                         shape=self.test_image.shape,
                                         name='test_input')
            self.xi_test = tf.expand_dims(self.x_test,
                                          0)  # add one dim for batch
            self.style_index_test = tf.placeholder(tf.int32,
                                                   shape=(1, ),
                                                   name='test_style_index')
            self.style_index_test_batch = tf.expand_dims(
                self.style_index_test, 0)

            # result image from transform-net
            self.y_hat_test = self.tester.net(
                self.xi_test / 255.0, self.style_index_test_batch
            )  # please build graph for train first. tester.net reuses variables.

        else:
            self.TEST = False
Esempio n. 53
0
 def get_pb(self, path):
     thumbs = glob(os.path.join(path, "thumb.*"))
     thumbs.extend(glob(os.path.join(self.path, "default_thumb.*")))
     thumbs = filter(lambda x: os.path.exists(x), thumbs)
     thumbs.append(None)
     return load_image(thumbs[0], THUMB_SIZE, THUMB_SIZE)
    def train(self):
        """ define optimizer Adam """
        global_step = tf.contrib.framework.get_or_create_global_step()

        trainable_variables = tf.trainable_variables()
        grads = tf.gradients(self.L_total, trainable_variables)

        optimizer = tf.train.AdamOptimizer(self.learn_rate)
        train_op = optimizer.apply_gradients(zip(grads, trainable_variables),
                                             global_step=global_step,
                                             name='train_step')
        """ session run """
        self.sess.run(tf.global_variables_initializer())

        # saver to save model
        saver = tf.train.Saver()
        current_style_num = 1
        """ loop for train """
        num_examples = len(self.x_list)
        # get iteration info
        epoch = 0
        iterations = 0
        try:
            while epoch < self.num_epochs:
                while iterations * self.batch_size < num_examples:
                    if current_style_num > len(self.y_list):
                        current_style_num = 1
                    curr = iterations * self.batch_size
                    step = curr + self.batch_size
                    x_batch = np.zeros(self.batch_shape, dtype=np.float32)
                    for j, img_p in enumerate(self.x_list[curr:step]):
                        x_batch[j] = get_img(img_p,
                                             (256, 256, 3)).astype(np.float32)

                    iterations += 1

                    assert x_batch.shape[0] == self.batch_size

                    _, L_total, L_content, L_style, L_tv, step = self.sess.run(
                        [
                            train_op, self.L_total, self.L_content,
                            self.L_style, self.L_tv, global_step
                        ],
                        feed_dict={
                            self.y_c: x_batch,
                            self.y_s: self.y_list[current_style_num - 1],
                            self.style_index: np.array([current_style_num - 1])
                        })

                    print(
                        'epoch : %d, iter : %4d, ' % (epoch, step),
                        'L_total : %g, L_content : %g, L_style : %g, L_tv : %f'
                        % (L_total, L_content, L_style, L_tv))

                    if step % self.check_period == 0:
                        res = saver.save(self.sess,
                                         self.save_path + '/final.ckpt', step)

                        if self.TEST:

                            output_image = self.sess.run(
                                [self.y_hat_test],
                                feed_dict={
                                    self.x_test:
                                    self.test_image,
                                    self.style_index_test:
                                    np.array([current_style_num - 1])
                                })
                            output_image = np.squeeze(
                                output_image[0])  # remove one dim for batch
                            output_image = np.clip(output_image, 0., 255.)

                            save_image(
                                output_image, self.save_path + '/result_' +
                                "%05d" % step + '.jpg')
                    current_style_num += 1
                epoch += 1
                iterations = 0
        except KeyboardInterrupt:
            pass
        finally:

            saver = tf.train.Saver()
            res = saver.save(self.sess, self.save_path + '/final.ckpt')
            self.sess.close()
            tf.reset_default_graph()

            for image_size in (384, 512):
                content_image = load_image(
                    '../input/examples/examples/content_img/content_2.png',
                    max_size=image_size)

                # open session
                soft_config = tf.ConfigProto(allow_soft_placement=True)
                soft_config.gpu_options.allow_growth = True  # to deal with large image
                sess = tf.Session(config=soft_config)

                # build the graph
                transformer = StyleTransferTester(session=sess,
                                                  model_path=res,
                                                  content_image=content_image,
                                                  n_styles=len(self.y_list))
                transformer.save_as_tflite('{}_{}'.format(
                    self.style_name, image_size))
                print('Saved as tflite!')
                sess.close()
                tf.reset_default_graph()
Esempio n. 55
0
    content_paths.append(args.content_path)

if args.style_dir:
    # use a batch of style images
    style_paths = extract_image_names(args.style_dir)
else:
    # use a single style image
    style_paths.append(args.style_path)

print('Number content images:', len(content_paths))
print('Number style images:', len(style_paths))

with torch.no_grad():

    for i in range(len(content_paths)):
        content = load_image(content_paths[i])
        content = preprocess(content, args.content_size)
        content = content.to(device)

        for j in range(len(style_paths)):
            style = load_image(style_paths[j])
            style = preprocess(style, args.style_size)
            style = style.to(device)

            if args.synthesis == 0:
                output = style_transfer(content, style)
                output = deprocess(output)

                if len(content_paths) == 1 and len(style_paths) == 1:
                    # used a single content and style image
                    save_path = '%s/%s.%s' % (args.save_dir, args.save_name,
Esempio n. 56
0
import numpy as np
import tensorflow as tf

import vgg16
import utils

import os

# create a list of images
path = './test_data/'
imlist = [
    os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')
]

image_array = [
    utils.load_image(img_path).reshape((1, 224, 224, 3)) for img_path in imlist
]
#for img_path in imlist:
#print(img_path)
#utils.load_image(img_path).reshape((1, 224, 224, 3))
#print("success")

#img1 = utils.load_image("./test_data/tiger.jpeg")
#img2 = utils.load_image("./test_data/puzzle.jpeg")

#batch1 = img1.reshape((1, 224, 224, 3))
#batch2 = img2.reshape((1, 224, 224, 3))

#batch = np.concatenate((batch1, batch2), 0)

batch = np.concatenate(image_array, 0)
Esempio n. 57
0
# -*- coding: utf-8 -*-

import tensorflow as tf

import vgg19_trainable as vgg19
import utils

img1 = utils.load_image("./tiger.jpeg")
img1_true_result = [0, 1, 0, 0, 0]  # 1-hot result for tiger

batch1 = img1.reshape((1, 224, 224, 3))

with tf.device('/cpu:0'):
    sess = tf.Session()

    images = tf.placeholder(tf.float32, [1, 224, 224, 3])
    true_out = tf.placeholder(tf.float32, [1, 5])
    train_mode = tf.placeholder(tf.bool)

    vgg = vgg19.Vgg19()
    vgg.build(images, train_mode)
    print('build finished')
    # print number of variables used: 143667240 variables, i.e. ideal size = 548MB
    print(vgg.get_var_count())

    sess.run(tf.global_variables_initializer())

    # test classification
    prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
    print(prob)
    # simple 1-step training
def load_val_batch(start, finish):
    imgs = np.array([utils.load_image(i) for i in val_set[start:finish]])
    labels = np.array([i for i in val_set_labels[start:finish]])
    return imgs, labels
Esempio n. 59
0
 def __getitem__(self, idx):
     X = utils.load_image(self.path[idx])
     if self.transform:
         X = self.transform(X)
     y = self.target[idx]
     return X, y
def train(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    transform = transforms.Compose([
        transforms.Scale(args.image_size),
        transforms.CenterCrop(args.image_size),
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    train_dataset = datasets.ImageFolder(args.dataset, transform)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size)

    transformer = TransformerNet(args.shiftnet)
    optimizer = Adam(transformer.parameters(), args.lr)
    mse_loss = torch.nn.MSELoss()

    vgg = Vgg16(requires_grad=False)
    style_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    style = utils.load_image(args.style_image, size=args.style_size)
    style = style_transform(style)
    style = style.repeat(args.batch_size, 1, 1, 1)

    if args.cuda:
        transformer.cuda()
        vgg.cuda()
        style = style.cuda()

    style_v = Variable(style)
    style_v = utils.normalize_batch(style_v)
    features_style = vgg(style_v)
    gram_style = [utils.gram_matrix(y) for y in features_style]

    for e in range(args.epochs):
        transformer.train()
        agg_content_loss = 0.
        agg_style_loss = 0.
        count = 0
        for batch_id, (x, _) in enumerate(train_loader):
            n_batch = len(x)
            count += n_batch
            optimizer.zero_grad()
            x = Variable(x)
            if args.cuda:
                x = x.cuda()

            y = transformer(x)

            y = utils.normalize_batch(y)
            x = utils.normalize_batch(x)
            features_y = vgg(y)
            features_x = vgg(x)

            content_loss = args.content_weight * mse_loss(
                features_y.relu2_2, features_x.relu2_2)

            style_loss = 0.
            for ft_y, gm_s in zip(features_y, gram_style):
                gm_y = utils.gram_matrix(ft_y)
                style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :])
            style_loss *= args.style_weight

            total_loss = content_loss + style_loss
            total_loss.backward()
            optimizer.step()

            agg_content_loss += content_loss.data[0]
            agg_style_loss += style_loss.data[0]

            if (batch_id + 1) % args.log_interval == 0:
                mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
                    time.ctime(), e + 1, count, len(train_dataset),
                    agg_content_loss / (batch_id + 1),
                    agg_style_loss / (batch_id + 1),
                    (agg_content_loss + agg_style_loss) / (batch_id + 1))
                print(mesg)

            if args.checkpoint_model_dir is not None and (
                    batch_id + 1) % args.checkpoint_interval == 0:
                transformer.eval()
                if args.cuda:
                    transformer.cpu()
                ckpt_model_filename = "ckpt_epoch_" + str(
                    e) + "_batch_id_" + str(batch_id + 1) + ".pth"
                ckpt_model_path = os.path.join(args.checkpoint_model_dir,
                                               ckpt_model_filename)
                torch.save(transformer.state_dict(), ckpt_model_path)
                if args.cuda:
                    transformer.cuda()
                transformer.train()

    # save model
    transformer.eval()
    if args.cuda:
        transformer.cpu()
    save_model_filename = "epoch_" + str(args.epochs) + "_" + str(
        time.ctime()).replace(' ', '_') + "_" + str(
            args.content_weight) + "_" + str(args.style_weight)
    if args.shiftnet:
        save_model_filename += "_shiftnet"
    save_model_filename += ".model"

    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    torch.save(transformer.state_dict(), save_model_path)

    print("\nDone, trained model saved at", save_model_path)