コード例 #1
0
ファイル: tron.py プロジェクト: etinlb/Parallel_Tron
 def __init__(self, location):
   """Initialize all the varibles"""
   super(Game, self).__init__(location)
   self.GRID_SIZEX = 32 # 
   self.GRID_SIZEY = 20
   self.bezelx = 33
   self.bezely = 21
   self.loc = []
   for x in range(0,self.bezelx ):
     self.loc.append([])
     for y in range(0,self.bezely ):
       self.loc[x].append(0) # 0 means not moved there yet
   print self.tile
   self.SCALE = 60
   self.WIDTH = self.GRID_SIZEX * self.SCALE # scale the pixels from gridspace
   self.HEIGHT = self.GRID_SIZEY * self.SCALE
   self.SIZE = (self.WIDTH, self.HEIGHT) 
   self.player1 = LightBike([0,0],  [1,0])
   self.player2 = LightBike([1,1], [-1,0])
   self.score = {'p1':0, 'p2':0}
   pygame.init()
   self.window = pygame.display.set_mode(self.SIZE)
   pygame.mouse.set_visible(False)
   self.image_dict = load_images()
   image_path = 'assets/backgrounds/Meteor_bkgrnd_10080-' + str(self.tile[0]) + '-' + str(self.tile[1]) + '.jpg'
   self.background = pygame.image.load(image_path).convert()
   self.backPos = pygame.Rect((0, 0), (0, 0))
   self.window.blit(self.background, self.backPos)
   self.p1_death_loc = [0,0]
   self.p2_death_loc = [0,0]
   self.p1_died = False
   self.p2_died = False
コード例 #2
0
  def __init__(self, ip, port):
    pygame.mixer.pre_init(channels=2, buffer=512)
    pygame.init()
    self.window = pygame.display.set_mode((200,200))
    self.image_dict = load_images()
    self.sock_list = []#[ [] for y in range(MONITOR_GRIDY)] for x in range(MONITOR_GRIDX)]

    self.pause = False
    self.quitting = False
    self.running = True

    self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    self.socket.bind((ip, port))
    self.socket.listen(MONITORS_NUM)

    while len(self.sock_list) != 15:
      open_sock, addr = self.socket.accept()
      self.sock_list.append(open_sock)

    
    pygame.mixer.init()

    self.mixer = pygame.mixer
    direc = '/home/shared/Parallel_Tron/'
    self.leftSound = self.mixer.Sound(direc+'assets/left.wav')
    self.rightSound = self.mixer.Sound(direc+'assets/right.wav')
    self.upSound = self.mixer.Sound(direc+'assets/up.wav')
    self.downSound = self.mixer.Sound(direc+'assets/down.wav')
    self.explode = self.mixer.Sound(direc+'assets/explode.wav')

    cycle = self.mixer.Sound(direc+'assets/cycle.wav')
    cycle.play(loops = -1, maxtime = 0, fade_ms = 0)
コード例 #3
0
    def __init__(self):
        pygame.init()
        self.player1 = LightBike(PLAYER1_START, 'hor', [1, 0], 'left')
        self.player2 = LightBike(PLAYER2_START, 'hor', [-1, 0], 'right')
        self.new_game_score()
        self.init_locations()
        self.window = pygame.display.set_mode((20, 20))
        self.image_dict = load_images()
        self.sock_list = [[[] for y in range(MONITOR_GRIDY)]
                          for x in range(MONITOR_GRIDX)]
        print self.sock_list
        #ips = open('ip-list-one.txt', 'r')
        ips = open('ip_list.txt', 'r').read()
        ip_list = [ip for ip in ips.split('\n') if ip != '']
        print(ip_list)

        idx = 0
        print(MONITOR_GRIDX, MONITOR_GRIDY)
        for x in range(0, MONITOR_GRIDX):
            for y in range(0, MONITOR_GRIDY):
                print(ip_list[idx], idx)
                self.sock_list[x][y] = socket.socket(socket.AF_INET,
                                                     socket.SOCK_STREAM)
                print(1)
                self.sock_list[x][y].connect((ip_list[idx], 20000))
                print(2)
                idx += 1
                print(3)

        self.fliped_1x, self.fliped_1y, self.fliped_2x, self.fliped_2y = 4 * [
            False
        ]
        self.flip_1x, self.flip_1y, self.flip_2x, self.flip_2y = 4 * [False]
        self.el_time = 0
        self.current_level = 1
コード例 #4
0
  def __init__(self):
    pygame.init()
    self.player1 = LightBike(PLAYER1_START, 'hor', [1,0], 'left' )
    self.player2 = LightBike(PLAYER2_START, 'hor', [-1,0], 'right' )
    self.new_game_score()
    self.init_locations()
    self.window = pygame.display.set_mode((20,20))
    self.image_dict = load_images()
    self.sock_list = [[ [] for y in range(MONITOR_GRIDY)] for x in range(MONITOR_GRIDX)]
    ips = open('ip_list.txt', 'r')
    ips.readline() #comment line
    address = ips.readline().strip()
    ip_list = []
    while address:
        ip_list.append(address)
        address = ips.readline().strip()
    idx = 0
    for x in range(0, MONITOR_GRIDX):
      for y in range(0, MONITOR_GRIDY):
        self.sock_list[x][y] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock_list[x][y].connect((ip_list[idx], 20000))
        idx += 1

    # self.ip_list = [('localhost', 20000), ('localhost', 20001)]#, ('localhost', 20001)]
    self.fliped_1x, self.fliped_1y, self.fliped_2x, self.fliped_2y = 4*[False]
    self.flip_1x, self.flip_1y, self.flip_2x, self.flip_2y = 4*[False]
    self.el_time = 0
    self.current_level = 1
コード例 #5
0
ファイル: tron.py プロジェクト: USDRCG/LongTailDeathMatch
 def __init__(self, location):
     """Initialize all the varibles"""
     super(Game, self).__init__(location)
     self.GRID_SIZEX = 32  #
     self.GRID_SIZEY = 20
     self.bezelx = 33
     self.bezely = 21
     self.loc = []
     for x in range(0, self.bezelx):
         self.loc.append([])
         for y in range(0, self.bezely):
             self.loc[x].append(0)  # 0 means not moved there yet
     print self.tile
     self.SCALE = 60
     self.WIDTH = self.GRID_SIZEX * self.SCALE  # scale the pixels from gridspace
     self.HEIGHT = self.GRID_SIZEY * self.SCALE
     self.SIZE = (self.WIDTH, self.HEIGHT)
     self.player1 = LightBike([0, 0], [1, 0])
     self.player2 = LightBike([1, 1], [-1, 0])
     self.score = {'p1': 0, 'p2': 0}
     pygame.init()
     self.window = pygame.display.set_mode(self.SIZE)
     pygame.mouse.set_visible(False)
     self.image_dict = load_images()
     image_path = 'assets/backgrounds/Meteor_bkgrnd_10080-' + str(
         self.tile[0]) + '-' + str(self.tile[1]) + '.jpg'
     self.background = pygame.image.load(image_path).convert()
     self.backPos = pygame.Rect((0, 0), (0, 0))
     self.window.blit(self.background, self.backPos)
     self.p1_death_loc = [0, 0]
     self.p2_death_loc = [0, 0]
     self.p1_died = False
     self.p2_died = False
コード例 #6
0
def check_sanity():

    print("Sanity check")

    sanity_test_path = "{}/sanity_test".format(ROOT)
    if os.path.isdir(sanity_test_path):
        shutil.rmtree(sanity_test_path)
    os.mkdir(sanity_test_path)

    labels = helper.load_labels()

    test_labels = []
    for i in range(5):
        rand_elem = randrange(12000)
        print("Rand item:", rand_elem)
        print("Fit label:", labels[rand_elem])
        test_img = helper.load_images(RESULT, "{}.png".format(rand_elem))
        cv2.imwrite("{}/sanity_{}.png".format(sanity_test_path, i), test_img)
        test_labels.append(labels[rand_elem])
    with open("{}/sanity_labels.txt".format(sanity_test_path), "w") as w:
        for x in test_labels:
            w.write(x)
コード例 #7
0
ファイル: pypet_launch.py プロジェクト: yunyouzh/hebbianCNN
    # 'conv_dMid'				: [+0.0, +0.1, +0.5],
    # 'conv_dLow'				: [-4.0, -2.0, -1.0]
    'feedf_dHigh': [+2.00, +6.00, +10.0],
    'feedf_dNeut': [-1.00, -0.50, -0.10],
    'feedf_dMid': [+0.00, +0.01, +0.10],
    'feedf_dLow': [-2.00, -1.00, -0.00]

    # 'conv_dMid'			: [-1.0, +0.0, +0.1],
    # 'conv_dLow'			: [-5.0, -2.0, -1.0, +0.0, +1.00],
}
""" load and pre-process images """
images_train, labels_train, images_test, labels_test = helper.load_images(
    # classes 		= np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int),
    classes=np.array([4, 7, 9], dtype=int),
    dataset_train='train',
    dataset_path='/Users/raphaelholca/Documents/data-sets/MNIST',
    pad_size=(parameter_dict['conv_filter_side'] - 1) / 2,
    load_test=True,
    cross_validate='search')
""" create directory to save data """
parameter_dict['pypet'] = True
parameter_dict['verbose'] = 0
parameter_dict['pypet_name'] = parameter_dict['name']
save_path = os.path.join('output', parameter_dict['name'])
pp.check_dir(save_path, overwrite=False)
print_dict = parameter_dict.copy()
print_dict.update(explore_dict)
""" create pypet environment """
env = pypet.Environment(trajectory='explore_perf',
                        log_stdout=False,
                        add_time=False,
コード例 #8
0
def detection(model, config):
    # Tf Session
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    print("> Building Graph")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            # start Videostream
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict([
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes', 'detection_masks'
            ])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'],
                                             [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'],
                                             [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0],
                                             tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0],
                                           [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                           [real_num_detection, -1, -1])
                detection_masks_reframed = reframe_box_masks_to_image_masks(
                    detection_masks, detection_boxes, config.HEIGHT,
                    config.WIDTH)
                detection_masks_reframed = tf.cast(
                    tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(
                    detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name(
                    'Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name(
                    'Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name(
                    'Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name(
                    'Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand

            # Timeliner
            if config.WRITE_TIMELINE:
                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                timeliner = TimeLiner()
            else:
                options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
                run_metadata = False

            images = load_images(config.IMAGE_PATH, config.LIMIT_IMAGES)
            timer = Timer().start()
            print('> Starting Detection')
            for image in images:
                if config.SPLIT_MODEL:
                    # split model in seperate gpu and cpu session threads
                    masks = None  # No Mask Detection possible yet
                    frame = cv2.resize(cv2.imread(image),
                                       (config.WIDTH, config.HEIGHT))
                    frame_expanded = np.expand_dims(cv2.cvtColor(
                        frame, cv2.COLOR_BGR2RGB),
                                                    axis=0)
                    timer.tic()
                    # GPU Session
                    score, expand = sess.run(
                        [score_out, expand_out],
                        feed_dict={image_tensor: frame_expanded},
                        options=options,
                        run_metadata=run_metadata)
                    timer.tictic()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(
                            run_metadata.step_stats,
                            'test_results/timeline_{}{}{}{}.json'.format(
                                config.OD_MODEL_NAME, '_SM1', config._DEV,
                                config._OPT))
                    timer.tic()
                    # CPU Session
                    boxes, scores, classes, num = sess.run(
                        [
                            tensor_dict['detection_boxes'],
                            tensor_dict['detection_scores'],
                            tensor_dict['detection_classes'],
                            tensor_dict['num_detections']
                        ],
                        feed_dict={
                            score_in: score,
                            expand_in: expand
                        },
                        options=options,
                        run_metadata=run_metadata)
                    timer.toc()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(
                            run_metadata.step_stats,
                            'test_results/timeline_{}{}{}{}.json'.format(
                                config.OD_MODEL_NAME, '_SM2', config._DEV,
                                config._OPT))
                else:
                    # default session
                    frame = cv2.resize(cv2.imread(image),
                                       (config.WIDTH, config.HEIGHT))
                    frame_expanded = np.expand_dims(cv2.cvtColor(
                        frame, cv2.COLOR_BGR2RGB),
                                                    axis=0)
                    timer.tic()
                    output_dict = sess.run(
                        tensor_dict,
                        feed_dict={image_tensor: frame_expanded},
                        options=options,
                        run_metadata=run_metadata)
                    timer.toc()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(
                            run_metadata.step_stats,
                            'test_results/timeline_{}{}{}.json'.format(
                                config.OD_MODEL_NAME, config._DEV,
                                config._OPT))
                    num = output_dict['num_detections'][0]
                    classes = output_dict['detection_classes'][0]
                    boxes = output_dict['detection_boxes'][0]
                    scores = output_dict['detection_scores'][0]
                    if 'detection_masks' in output_dict:
                        masks = output_dict['detection_masks'][0]
                    else:
                        masks = None

                # reformat detection
                num = int(num)
                boxes = np.squeeze(boxes)
                classes = np.squeeze(classes).astype(np.uint8)
                scores = np.squeeze(scores)

                # Visualization
                vis = visualize_objectdetection(
                    frame, boxes, classes, scores, masks, category_index,
                    timer.get_frame(), config.MAX_FRAMES, timer.get_fps(),
                    config.PRINT_INTERVAL, config.PRINT_TH,
                    config.OD_MODEL_NAME + config._DEV + config._OPT,
                    config.VISUALIZE)
                if not vis:
                    break

    cv2.destroyAllWindows()
    timer.stop()
コード例 #9
0
def segmentation(model, config):
    images = load_images(config.IMAGE_PATH, config.LIMIT_IMAGES)
    # Tf Session + Timeliner
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    if config.WRITE_TIMELINE:
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        timeliner = TimeLiner()
    else:
        options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
        run_metadata = False
    timer = Timer().start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            for image in images:
                # input
                frame = cv2.imread(image)
                height, width, channels = frame.shape
                resize_ratio = 1.0 * 513 / max(width, height)
                target_size = (int(resize_ratio * width),
                               int(resize_ratio * height))
                frame = cv2.resize(frame, target_size)
                timer.tic()
                batch_seg_map = sess.run(
                    'SemanticPredictions:0',
                    feed_dict={
                        'ImageTensor:0':
                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]
                    },
                    options=options,
                    run_metadata=run_metadata)
                timer.toc()
                if config.WRITE_TIMELINE:
                    timeliner.write_timeline(
                        run_metadata.step_stats,
                        'test_results/timeline_{}{}{}.json'.format(
                            config.OD_MODEL_NAME, config._DEV, config._OPT))
                seg_map = batch_seg_map[0]
                #boxes = []
                #labels = []
                map_labeled = measure.label(seg_map, connectivity=1)
                for region in measure.regionprops(map_labeled):
                    if region.area > config.MINAREA:
                        box = region.bbox
                        label = config.LABEL_NAMES[seg_map[tuple(
                            region.coords[0])]]
                        #boxes.append(box)
                        #labels.append(label)
                        if config.VISUALIZE:
                            draw_single_box_on_image(frame, box, label)

                vis = visualize_deeplab(
                    frame, seg_map, timer.get_frame(), config.MAX_FRAMES,
                    timer.get_fps(), config.PRINT_INTERVAL, config.PRINT_TH,
                    config.OD_MODEL_NAME + config._DEV + config._OPT,
                    config.VISUALIZE)
                if not vis:
                    break
        cv2.destroyAllWindows()
    timer.stop()
コード例 #10
0
ファイル: sprites.py プロジェクト: jambonmcyeah/jetpack-game
class Zapper(MovingSprite, KillIfOutOfScreenSprite):
    """A class representing zapper sprites, inherits from MovingSprite, KillIfOutOfScreenSprite"""

    IMAGES: typing.Tuple[pygame.Surface, ...] = tuple(
        next(helper.load_images(os.path.join("assets", "sprites", "zapper"))))

    def __init__(self,
                 orientation: bool = True,
                 direction: bool = True,
                 *groups,
                 **kwargs):
        """
        Initializer for the Zapper class
        orientation: on if True, else False
        direction: horizontal if True, else vertical
        """
        super().__init__(image=self.IMAGES[0], *groups, **kwargs)

        self.orientation = orientation
        self.direction = direction

    @classmethod
    def random_spawn(cls, screen: pygame.Surface, *groups, **kwargs):
        """Randomly spawns a zapper in a random location and orientation"""
        instance = cls(screen=screen,
                       position=(0, 0),
                       direction=helper.chance(0.5),
                       *groups,
                       **kwargs)
        instance.position = (screen.get_size()[0] - 1,
                             random.randrange(
                                 0,
                                 screen.get_size()[1] - instance.size[1]))

        return instance

    @property
    def orientation(self) -> bool:
        """Getter for the orientation attribute of this Zapper"""
        return self.__orientation

    @orientation.setter
    def orientation(self, value: bool):
        """Setter for the orientation attribute of this Zapper"""
        self.__orientation: bool = value
        self.__update_image()

    @property
    def direction(self) -> bool:
        """Getter for the direction attribute of this Zapper"""
        return self.__direction

    @direction.setter
    def direction(self, value: bool):
        """Setter for the direction attribute of this Zapper"""
        self.__direction: bool = value
        self.__update_image()

    def __update_image(self):
        """update_image method for this Zapper"""
        try:
            if self.orientation:
                if self.direction:
                    self.image = self.IMAGES[1]
                else:
                    self.image = self.IMAGES[3]
            else:
                if self.direction:
                    self.image = self.IMAGES[0]
                else:
                    self.image = self.IMAGES[2]

        except AttributeError:
            pass
コード例 #11
0
ファイル: main.py プロジェクト: jambonmcyeah/jetpack-game
def main():
    """This function defines the mainline logic for this program"""

    # D - Display
    screen: pygame.Surface = pygame.display.set_mode((1000, 480))
    pygame.display.set_caption("Jetpack Joyride")

    # E - Entities

    # Music
    pygame.mixer.music.load(os.path.join("assets", "audio", "music.wav"))
    pygame.mixer.music.play(-1)

    # Speed
    dx = 8

    # Background

    backgrounds = pygame.sprite.Group(
        sprites.BackgroundSprite(
            screen=screen, images=list(images), velocity=(0, 0))
        for images in helper.load_images(os.path.join("assets", "background")))

    # Players
    player = sprites.Player(screen=screen,
                            position=(round(screen.get_size()[0] * (1 / 8)),
                                      0))
    player.flying = False

    players = pygame.sprite.Group(player)

    # Zappers
    zapper_spacings = (300, 500)
    zappers = pygame.sprite.Group()

    # Scoreboards
    scoreboard = sprites.Scoreboard()
    scoreboards = pygame.sprite.Group(scoreboard)

    # Game Over
    game_over = sprites.TextSprite(position=(0, 0),
                                   font=helper.default_font(48, bold=True),
                                   text="GAME OVER!",
                                   color=pygame.Color(255, 255, 255, 255),
                                   antialias=True)

    game_over.horizontally_center(0, screen.get_size()[0])
    game_over.vertically_center(0, screen.get_size()[1])

    # Groups
    background_sprites = [backgrounds, zappers]
    foreground_sprites = [players, scoreboards]

    all_sprites = [background_sprites, foreground_sprites]
    game_sprites = pygame.sprite.LayeredUpdates(all_sprites)

    # A - Assign Variables
    next_zapper_spacing = random.randint(*zapper_spacings)
    zapper_distance = 0

    clock = pygame.time.Clock()
    keep_going = True

    # Hide the mouse pointer
    pygame.mouse.set_visible(False)

    while keep_going:

        # T - Time
        clock.tick(60)

        # E - Event Handling
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                keep_going = False

            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_SPACE:
                    player.flying = True
            if event.type == pygame.KEYUP:
                if event.key == pygame.K_SPACE:
                    player.flying = False

        # Update speed of background sprites
        for group in background_sprites:
            for sprite in group:
                if isinstance(sprite, sprites.MovingSprite):
                    sprite.dx = -dx

        background_speed = -dx

        # Update speed of backgrounds
        for sprite in reversed(list(backgrounds)):
            background_speed *= (3 / 4)
            sprite.dx = round(background_speed)

        # Spawn Zapper
        if zapper_distance > next_zapper_spacing:
            zapper_distance = 0

            zappers.add(
                sprites.Zapper.random_spawn(
                    screen=screen,
                    velocity=(-dx, 0),
                ))

            next_zapper_spacing = random.randint(*zapper_spacings)

        zapper_distance += dx

        # Add new sprites
        game_sprites.add(all_sprites)

        # Check collisions
        # Optimize precise collisions by only checking them if rect collides
        for player, collided_zappers in pygame.sprite.groupcollide(
                players, zappers, False, False).items():
            for zapper in collided_zappers:
                if pygame.sprite.collide_mask(player, zapper):
                    if not player.dead:
                        player.dead = True

        # Check if all players are dead
        if all(map(lambda x: x.dead, players)):
            dx = 0
            pygame.mixer.music.stop()
            game_sprites.add(game_over)

        # Update Scoreboard
        scoreboard.pixels += dx

        # R - Refresh Screen
        game_sprites.update()
        game_sprites.draw(screen)

        pygame.display.flip()

    pygame.mouse.set_visible(True)
    pygame.quit()
コード例 #12
0
ファイル: inference.py プロジェクト: fmigone/GAN-traffic-sign
import config
import helper
from numpy import genfromtxt

import csv
class_labels = {}
with open('signnames.csv', mode='r') as infile:
    reader = csv.reader(infile)
    class_labels = {key: val for key, val in reader}


DATA_SET = 'test'  # {'test', 'train', 'valid'}

# Load images and labels
#x, y = helper.load_data(DATA_SET)
x = helper.load_images(DATA_SET)
# Load model
model_file = config.MODEL_DEFINITION
with open(model_file, 'r') as jfile:
    model = model_from_json(json.loads(jfile.read()))

# Compile model and load weights
model.compile(optimizer=Adam(), loss='categorical_crossentropy',
              metrics=['accuracy'])
model.load_weights(config.MODEL_WEIGHTS)

# Evaluate model performace
print('Evaluating performance on %d samples' % x.shape[0])
#y_cat = np_utils.to_categorical(y, config.NUM_CLASSES)
scores = model.predict(x, verbose=0)
#names = model.metrics_names