Example #1
0
def cacheImageCallBack():
    logScreen("Cargando imagen desde caché.")
    data.matrix = apt.decode(data.filename, cache=True)
    data.frameA = utils.get_frame(data.matrix, 'A')
    data.frameB = utils.get_frame(data.matrix, 'B')
    logScreen("Imagen cargada desde caché.")
    previewImageCallBack(1)
Example #2
0
def calibrateImageCallBack():
    logScreen("Calibrando imagen...")
    data.matrix = cal.calibrate(
        data.matrix, frame_name="A", debug=False, cache=False
    )  # La imagen se puede calibrar con cualquiera de los dos Telemetry Frame
    data.frameA = utils.get_frame(data.matrix, 'A')
    data.frameB = utils.get_frame(data.matrix, 'B')
    img = Image.fromarray(data.matrix)
    data.image = img
    logScreen("Imagen calibrada correctamente.")
    previewImageCallBack(1)
Example #3
0
def decodeImageCallBack():
    root.filename = ""
    root.filename = tkinter.filedialog.askopenfilename(
        initialdir=dir,
        title="Select file",
        filetypes=(("wav files", "*.wav"), ("all files", "*.*")))
    logScreen("Decodificando imagen a partir de la señal APT " + root.filename)
    data.matrix = apt.decode(root.filename, cache=False)
    data.frameA = utils.get_frame(data.matrix, 'A')
    data.frameB = utils.get_frame(data.matrix, 'B')
    logScreen("Imagen APT generada correctamente.")
    previewImageCallBack(1)
Example #4
0
def filterImageCallBack():
    size = tkinter.simpledialog.askinteger("Input",
                                           "Disk size",
                                           parent=root,
                                           minvalue=1,
                                           maxvalue=100)
    data.matrix = utils.mean_filter(data.matrix, size)
    data.frameA = utils.get_frame(data.matrix, 'A')
    data.frameB = utils.get_frame(data.matrix, 'B')
    img = Image.fromarray(data.matrix)
    data.image = img
    logScreen("Imagen filtrada.")
    previewImageCallBack(1)
Example #5
0
def test_env(args):

    env = gym.make('FetchPickAndPlace-v1')
    env.seed(args.seed)

    #env = pickle.load(open('tmp_env_state/env.pickle', 'rb'))

    #env.reset()

    M = np.load('tmp_data/proj.npy')
    x = env.reset()
    object_pos = x['achieved_goal']
    goal_pos = x['desired_goal']

    object_pos = np.array([object_pos[0], object_pos[1], object_pos[2],
                           1]).astype(np.float32)
    goal_pos = np.array([goal_pos[0], goal_pos[1], goal_pos[2],
                         1]).astype(np.float32)

    object_pixel = M.dot(object_pos)[:2] * (64.0 / 300.0)
    goal_pixel = M.dot(goal_pos)[:2] * (64.0 / 300.0)

    plt.imshow(utils.get_frame(env))
    plt.scatter(object_pixel[0], object_pixel[1], color='y')
    plt.scatter(goal_pixel[0], goal_pixel[1], color='g')
    plt.show()
Example #6
0
    def need_refuel(self):
        debug = self.debug
        opt = self.ship_config['refuel']

        frame = get_frame()
        crop = frame[opt['need_refuel_crop_y1']:opt['need_refuel_crop_y2'],
                     opt['need_refuel_crop_x1']:opt['need_refuel_crop_x2']]

        red = crop.copy()
        red[:, :, 0] = 0
        red[:, :, 1] = 0
        lower_mask = np.array([0, 0, 252])
        upper_mask = np.array([0, 0, 255])
        gray = cv2.inRange(red, lower_mask, upper_mask)

        kernel = np.ones((4, 4), np.uint8)
        erosion = cv2.erode(gray, kernel, iterations=1)

        contours = cv2.findContours(erosion, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[1]

        w = 0
        contour = None
        if len(contours) > 0:
            contour = max(contours, key=cv2.contourArea)
            cv2.drawContours(crop, [contour], 0, (255, 255, 0), 1)
            w = cv2.boundingRect(contour)[2]

        debug_image = None
        if debug:
            debug_image = join_images(
                crop, cv2.cvtColor(erosion, cv2.COLOR_GRAY2RGB))

        return opt['min_refuel'] > w, debug_image, w
Example #7
0
 def recv_frame(self, dt):
     frame = utils.get_frame(self.i_frame)
     print(frame.shape)
     if (frame is not None):
         utils.save_image('foo.png', frame)
         self.ids.image_source.reload()
         self.i_frame += 1
     else:
         self.i_frame = 0
Example #8
0
def test_env(args):

    # env = gym.make('FetchPickAndPlace-v1')
    # env.seed(args.seed)

    env = pickle.load(open('tmp_env_state/env.pickle', 'rb'))

    #env.reset()

    plt.imshow(utils.get_frame(env))
    plt.show()
Example #9
0
 def step(self, action):
     self.count_steps+=1
     self.change_pose(action)
     im = self.vscan.render(self.x, self.y, self.z, self.rx, self.ry, self.rz)
     self.space_carve(im)
     fr = ut.get_frame(im)
     self.state=np.array([self.state[:,:,1],self.state[:,:,2], fr]).transpose(1,2,0)
     reward, done = self.get_reward()
     if self.count_steps > self.max_steps:
         done = True
     return self.state, reward, done, {}
Example #10
0
    def test(self):

        with tf.Session() as sess:

            self.load(sess)

            sess.run(self.model.set_validation_mode)

            # tf.train.start_queue_runners(sess)

            while True:

                if self.reset:

                    self.init_env()

                self.env.render()

                x = np.asarray(self.x_buffer).swapaxes(0, 1)

                a_logits, d_logits = sess.run([self.a_logits, self.d_logits],
                                              {self.x: x})

                a_logits = np.clip(a_logits, -MAX_A_LOGITS, MAX_A_LOGITS)

                a_t = utils.sample(a_logits[0], 10)

                if HUMAN_MODE:

                    a_t = self.human_input()

                clone_s = self.env.env.clone_full_state()

                clone_a = a_t

                state = self.interact(a_t, SKIP_FRAME)

                if state is None:

                    self.reset = True

                    continue

                self.x_buffer.append(
                    utils.get_frame(state, C_IN, HEIGHT, WIDTH))

                adv_vec = self.create_adv(self.x_buffer, d_logits[0][0],
                                          clone_s, clone_a, sess, False)

                self.fake_acc = 0.9 * self.fake_acc + 0.1 * self.calc_accuracy(
                    d_logits, 1)

                print("acc: %f, adv_tilda: %s" % (self.fake_acc, adv_vec))
Example #11
0
    def init_env(self):

        # self.env.game.new_episode()
        self.env.reset()

        for i in range(4):

            state, _, done, info = self.env.step(1)

        self.x_buffer = utils.Buffer(
            N_FRAMES, utils.get_frame(state, C_IN, HEIGHT, WIDTH))

        self.reset = False
Example #12
0
def filtrarMedianaCallback():
    n1 = tkinter.simpledialog.askfloat("Input",
                                       "Sensitividad",
                                       parent=root,
                                       minvalue=0.,
                                       maxvalue=255.)
    n2 = tkinter.simpledialog.askinteger("Input",
                                         "Número de pasadas",
                                         parent=root,
                                         minvalue=1,
                                         maxvalue=10.)

    data.matrix = denoise.filtro_mediana(data.matrix,
                                         sensitivity=n1,
                                         pasadas=n2)
    data.frameA = utils.get_frame(data.matrix, 'A')
    data.frameB = utils.get_frame(data.matrix, 'B')
    img = Image.fromarray(data.matrix)
    data.image = img
    logScreen("Imagen filtrada correctamente.")
    previewImageCallBack(1)
    return
Example #13
0
 def reset(self):
    del(self.sc)
    self.set_sc()       
    self.count_steps = 0 
    self.cd=5 #Compute the real d
    self.i_theta = np.random.randint(0,self.N_theta)
    self.i_phi = np.random.randint(0,self.N_phi)
    self.update_pose()
    im = self.vscan.render(self.x, self.y, self.z, self.rx, self.ry, self.rz)
    self.space_carve(im)
    fr = ut.get_frame(im)
    self.state = np.array([fr,fr,fr]).transpose(1,2,0)
    return self.state
Example #14
0
    def select_first_star(self, use_template=False):
        debug = self.debug

        result = False

        click_keys([Buttons.BUTTON_1], 0.1)
        time.sleep(1)

        click_keys([Buttons.D], 0.1)

        click_keys([Buttons.S], 0.1)
        click_keys([Buttons.W], 1)

        click_keys([Buttons.SPACE], 0.1)

        debug_image = None

        if use_template:
            opt = self.ship_config['menu']

            time.sleep(1)

            frame = get_frame()
            crop = frame[opt['select_menu_y1']:opt['select_menu_y2'],
                         opt['select_menu_x1']:opt['select_menu_x2']]

            gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
            gray = cv2.threshold(gray, 0, 255,
                                 cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

            if match_template(gray, self.ship_dir + '/images/unlock.png',
                              0.75)[0]:
                click_keys([Buttons.W], 0.1)
                click_keys([Buttons.SPACE], 0.1)
                result = True
            elif match_template(gray, self.ship_dir + '/images/lock.png',
                                0.75)[0]:
                click_keys([Buttons.SPACE], 0.1)
                result = True

            if debug:
                debug_image = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
        else:
            time.sleep(0.1)
            click_keys([Buttons.SPACE], 0.1)
            result = True

        click_keys([Buttons.BUTTON_1], 0.1)
        return result, debug_image
Example #15
0
    def __getitem__(self, index):
        image_index = index // self.frames_number
        frame_index = index % self.frames_number

        path_to_image = self.dataset.iloc[image_index]['image']
        np_image = np.array(
            Image.open(path_to_image).convert('L').resize(self.image_size))
        frame = get_frame(np_image[..., np.newaxis],
                          frame_size=self.frame_size,
                          overlay_size=self.overlay_size,
                          index=frame_index)

        if self.do_transform():
            frame = self.transform(frame)

        label = self.dataset.iloc[image_index]['label']

        return (frame, label)
Example #16
0
    def is_jumping(self):
        debug = self.debug
        opt = self.ship_config['radar']

        frame = get_frame()
        crop = frame[opt['radar_crop_y1']:opt['radar_crop_y2'],
                     opt['radar_crop_x1']:opt['radar_crop_x2']]

        lower_mask = np.array([200, 200, 200])
        upper_mask = np.array([255, 255, 255])
        mask = cv2.inRange(crop, lower_mask, upper_mask)

        kernel = np.ones((4, 4), np.uint8)
        dilation = cv2.dilate(mask, kernel, iterations=1)

        rows = dilation.shape[0]
        circles = cv2.HoughCircles(dilation,
                                   cv2.HOUGH_GRADIENT,
                                   2,
                                   rows / 8,
                                   param1=20,
                                   param2=50,
                                   minRadius=26,
                                   maxRadius=29)

        center = None
        radius = None
        if circles is not None:
            circles = np.uint16(np.around(circles))
            for c in circles:
                circle = c[0]
                center = (circle[0], circle[1])
                radius = circle[2]

        debug_image = None
        if debug:
            debug_image = crop
            if center is not None and radius is not None:
                cv2.circle(debug_image, center, radius, (0, 0, 255), 5)
            debug_image = join_images(
                crop, cv2.cvtColor(dilation, cv2.COLOR_GRAY2RGB))

        return circles is not None and len(circles) > 0, debug_image
Example #17
0
    def is_refueling(self):
        debug = self.debug
        opt = self.ship_config['refuel']

        frame = get_frame()
        crop = frame[opt['need_refuel_crop_y1']:opt['need_refuel_crop_y2'],
                     opt['need_refuel_crop_x1']:opt['need_refuel_crop_x2']]

        lower_mask = np.array([240, 240, 240])
        upper_mask = np.array([255, 255, 255])
        mask = cv2.inRange(crop, lower_mask, upper_mask)

        contours = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[1]

        debug_image = None
        if debug:
            debug_image = join_images(crop,
                                      cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB))

        return len(contours) > 0, debug_image
Example #18
0
def getThermalImage(matrix, satellite_NOAA="19", frame_name="A"):
    temp_min = 1000
    temp_max = 0
    if satellite_NOAA == "19":
        satellite = sat.NOAA_19()
    elif satellite_NOAA == "18":
        satellite = sat.NOAA_18()
    elif satellite_NOAA == "15":
        satellite = sat.NOAA_15()
    matrix = cal.calibrate(utils.mean_filter(matrix, 2), frame_name)
    #matrix = utils.mean_filter(matrix,2)
    frame = utils.get_frame(matrix, frame_name)
    mayor_frame = tlmtry.get_mayor_frame(matrix, frame_name)
    Cs = tlmtry.compute_CS(matrix, frame_name)
    print("Cs:", Cs)
    print("mayor frame:", mayor_frame)
    thermal_matrix = get_temp_3A(frame, mayor_frame, satellite, Cs)
    imgt = Image.fromarray(thermal_matrix)
    imgt = np.array(imgt)
    numrows, numcols = imgt.shape
    return thermal_matrix
Example #19
0
    def is_fuel_scoop_active(self):
        debug = self.debug
        opt = self.ship_config['refuel']

        is_active = False

        frame = get_frame()
        crop = frame[opt['fuel_scoop_crop_y1']:opt['fuel_scoop_crop_y2'],
                     opt['fuel_scoop_crop_x1']:opt['fuel_scoop_crop_x2']]

        debug_image = None
        if debug:
            debug_image = crop.copy()

        crop[:, :, 0] = 0
        crop[:, :, 1] = 0

        lower_mask = np.array([0, 0, 252])
        upper_mask = np.array([0, 0, 255])

        gray = cv2.inRange(crop, lower_mask, upper_mask)

        kernel = np.ones((2, 2), np.uint8)
        erosion = cv2.erode(gray, kernel, iterations=1)
        kernel = np.ones((2, 2), np.uint8)
        dilation = cv2.dilate(erosion, kernel, iterations=4)

        contours = cv2.findContours(dilation, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[1]

        if len(contours) > 0:
            is_active = True

        if debug:
            debug_image = join_images(
                debug_image, cv2.cvtColor(dilation, cv2.COLOR_GRAY2RGB))

        return is_active, debug_image
Example #20
0
    def is_in_jump(self):
        debug = self.debug
        opt = self.ship_config['jump']

        frame = get_frame()
        crop = frame[opt['in_jump_crop_y1']:opt['in_jump_crop_y2'],
                     opt['in_jump_crop_x1']:opt['in_jump_crop_x2']]

        lower_mask = np.array([250, 250, 250])
        upper_mask = np.array([255, 255, 255])
        mask = cv2.inRange(crop, lower_mask, upper_mask)

        kernel = np.ones((4, 4), np.uint8)
        dilation = cv2.dilate(mask, kernel, iterations=1)

        contours = cv2.findContours(dilation, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[1]

        debug_image = None
        if debug:
            debug_image = cv2.cvtColor(dilation, cv2.COLOR_GRAY2RGB)

        return len(contours) > 0, debug_image
Example #21
0
    def explore_d_logits(self, x_buffer, state, action, sess, render=False):

        d_expert = []

        x_tilda_buffer = copy.copy(x_buffer)

        for a_tilda in range(NUM_ACTIONS):

            if render:

                self.env.render()

            state_tilda = self.interact(a_tilda, SKIP_FRAME)

            if render:

                self.env.render()

            if state_tilda is None:

                d_expert.append(0.)

            else:

                x_tilda_buffer.append(
                    utils.get_frame(state_tilda, C_IN, HEIGHT, WIDTH))

                x_tilda = np.asarray(x_tilda_buffer).swapaxes(0, 1)

                a_logits_tilda, d_logits_tilda = sess.run(
                    [self.a_logits, self.d_logits], {self.x: x_tilda})

                d_expert.append(d_logits_tilda[0][0])

            self.restore_state(state, action)

        return np.asarray(d_expert)
Example #22
0
    def is_in_route(self):
        debug = self.debug

        click_keys([Buttons.BUTTON_1], 0.1)
        time.sleep(2)

        frame = get_frame()

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        result, loc = match_template(gray, self.ship_dir + '/images/route.png',
                                     0.8)

        click_keys([Buttons.BUTTON_1], 0.1)

        debug_image = None
        if debug:
            debug_image = frame
            for pt in zip(*loc[::-1]):
                cv2.rectangle(frame, (pt[0] - 1, pt[1] - 1),
                              (pt[0] + 30, pt[1] + 28), (0, 0, 255), 1)
            debug_image = cv2.resize(debug_image, (0, 0), fx=0.5, fy=0.5)

        return result, debug_image
Example #23
0
 def __init__(self, fps=30, **kwargs):
     super(KivyCamera, self).__init__(**kwargs)
     self.fps = fps
     self.test = utils.get_frame(fold)
Example #24
0
def evaluate_control_success(args):
    files = glob.glob(os.path.join(args.data_dir, "*.npz"))
    count = 0

    model = load_model(args)
    M = np.load('tmp_data/proj.npy')

    count = 0
    num_steps = 0
    for i, f in enumerate(files):
        data = np.load(f, allow_pickle=True)

        img_seq = data['image']
        action_seq = data['action'].astype(np.float32)
        goal_pos_w = data['obs'][0]['desired_goal']

        print("To reach Distance:",
              np.linalg.norm(data['obs'][-1]['achieved_goal'] - goal_pos_w))

        goal_pos = convert_to_pixel(goal_pos_w, M)
        img_seq = convert_img_torch(img_seq)

        start_img = img_seq[0]
        goal_img = img_seq[-1]

        with torch.no_grad():
            start_keyp = model.img_to_keyp(
                start_img[None, None, Ellipsis])[0, 0, :, :2]  # num_keyp x 2
            goal_keyp = model.img_to_keyp(goal_img[None, None,
                                                   Ellipsis])[0, 0, :, :2]

            env = gym.make('FetchReach-v1')
            env.seed(args.seed)

            mpc = MPC(model, goal_keyp, H=args.horizon)

            env.reset()
            keyp = start_keyp
            frames = []
            reached = False
            for t in range(args.max_episode_steps):
                action = mpc.select_min_cost_action(keyp).cpu().numpy()
                x, _, done, _ = env.step(action)
                im = utils.get_frame(env)
                frames.append(im)
                keyp = mpc.get_keyp_state(im)

                gripper_pos = x['achieved_goal']
                if np.linalg.norm(gripper_pos - goal_pos_w) <= 0.03:
                    num_steps += t
                    reached = True
                    break

            if reached:
                print("Reached")
                count += 1
                frames = np.stack(frames)

                l_dir = args.train_dir if args.is_train else args.test_dir
                save_dir = os.path.join(args.vids_dir, "control",
                                        args.vids_path)
                if not os.path.isdir(save_dir): os.makedirs(save_dir)
                save_path = os.path.join(
                    save_dir, l_dir + "_{}_seed_{}.mp4".format(i, args.seed))
                viz_imgseq_goal(frames,
                                goal_pos,
                                unnormalize=False,
                                save_path=save_path)
            else:
                print("Did not reach")

    print("Success Rate: ", float(count) / len(files))
    print("Average Num of steps: ", float(num_steps) / count)
Example #25
0
# 
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING.  If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.

import sys
sys.path.insert(0, './extras/')
sys.path.insert(0, './')

import apt
import utils
import numpy as np
import scipy.signal

from PIL import Image

matrix = apt.decode('wav/am_demod/sample.wav')


utils.plot_histogram(matrix,'Imagen completa')
utils.plot_image(matrix,'Imagen completa')

# Giro la imagen 180 grados porque el satélite recorría de Sur a Norte
frameA = utils.flip(utils.get_frame(matrix,"A"))
frameB = utils.flip(utils.get_frame(matrix,"B"))

#
utils.plot_histogram(frameB,'Histograma Banda Infrarroja', save = True)
utils.plot_histogram(frameA,'Histograma Espectro visible', save = True)
Example #26
0
sys.path.insert(0, './')

import apt
import calibrate as cal
import utils
import numpy as np
import scipy.signal

from PIL import Image

matrix = apt.decode('wav/am_demod/2019.03.04.16.23.59.wav', cache = False)

'''
Calibrate image with Telemetry Frame
'''
cal = cal.calibrate(matrix,frame_name="A", debug=True, cache=False) # La imagen se puede calibrar con cualquiera de los dos Telemetry Frame


utils.plot_image(utils.flip(cal), 'Imagen APT calibrada')
utils.save_image(utils.flip(cal), 'Imagen APT calibrada')

frameA_cal = utils.get_frame(cal, "A")
frameB_cal = utils.get_frame(cal, "B")


utils.plot_image(utils.flip(frameA_cal), 'Frame A calibrado')
utils.save_image(utils.flip(frameA_cal), 'Frame A calibrado')

utils.plot_image(utils.flip(frameB_cal), 'Frame B calibrado')
utils.save_image(utils.flip(frameB_cal), 'Frame B calibrado')
Example #27
0
"""Initialize and train the SVM"""
x, y = utils.get_data(hog_list)
del hog_list
svm_par = dict(kernel_type=cv2.SVM_LINEAR, svm_type=cv2.SVM_C_SVC)
# svm_par = dict(kernel_type=cv2.SVM_LINEAR, svm_type=cv2.SVM_C_SVC, C=0.01)
svm_vec = utils.load_svm("svm.pickle", x, y, svm_par)
# svm_vec = cv2.HOGDescriptor_getDefaultPeopleDetector()
hog_obj.setSVMDetector(np.array(svm_vec))

"""Ground truth"""
true_detection_list = utils.get_truth('view8.json')
n_frame = len(true_detection_list)

"""Multi-scale detector on the video"""
hog_detection_list = []
frm_list = utils.get_frame(frm_path)
frm_list = frm_list[0:n_frame]
i = -1
for frm in frm_list:
    i += 1
    found_true = true_detection_list[i]
    found_filtered = []
    found, w = hog_obj.detectMultiScale(frm, **hog_par)
    for r in found:
        inside = False
        for q in found:
            if utils.is_inside(r, q):
                inside = True
                break
        if not inside:
            found_filtered.append(r)
Example #28
0
from PIL import Image


def indice_nubosidad(frame, x1, y1, x2, y2):
    '''
    Completar código aquí
    '''

    utils.plot_image(matrix[y1:y2, x1:x2], 'Imagen APT ')
    index = 0

    return index


'''
Decodifico y calibro la señal
'''
matrix = apt.decode('wav/am_demod/2019.03.04.19.30.49.wav', cache=True)
matrix = cal.calibrate(matrix, frame_name="A", debug=False, cache=True)
'''
Visualizamos la imagen APT para identificar el canal infrarrojo
'''

utils.plot_image(matrix, 'Imagen APT ')
matrix_channel_A = utils.get_frame(matrix, "A")
matrix_channel_B = utils.get_frame(matrix, "B")
''''
Invocamos la función que calcula el índice de nubosidad
'''
indice = indice_nubosidad(matrix_channel_A, x1=140, y1=612, x2=750, y2=790)
Example #29
0
        plt.plot(tlmtry.get_vector(telemetry_norm),
                 label='Normalized Telemetry Vector')
        plt.legend()
        plt.show()
    '''
    Histogram
    '''

    display = False
    if display == True:
        utils.plot_histogram(matrix, "Raw Histogram")
        utils.plot_histogram(img_filtered, "Raw Filtered")

    matrix = img_filtered

    frame_A = utils.get_frame(matrix, "A")
    frame_B = utils.get_frame(matrix, "B")

    ##SPACE AND TIME FRAME SYNC

    space_time_sync_frame = tlmtry.get_space_time_sync_frame(matrix, "B")
    space_time_sync_frame = Image.fromarray(space_time_sync_frame)

    display = True
    if display == True:
        plt.figure()
        plt.title('SPACE AND TIME FRAME SYNC')
        plt.imshow(space_time_sync_frame, cmap='gray')
    '''
    Get Thermal Image
    '''
Example #30
0
matrix_norm = cal.calibrate(utils.mean_filter(matrix, 2), frame_name="A")
'''
Display Telemetry Frames (comparing )
'''
frame = "A"
matrix_filtered = utils.mean_filter(matrix, 2)
telemetry_norm = tlmtry.get_frame(
    cal.calibrate(matrix_filtered, frame_name="A"), frame)
telemetry = tlmtry.get_frame(matrix_filtered, frame)

print "telemetry norm:", telemetry_norm
print "telemetry:", telemetry
tel_image = Image.fromarray(telemetry)
tel_image_norm = Image.fromarray(telemetry_norm)

frame_A = utils.get_frame(matrix_filtered, "A")
frame_B = utils.get_frame(matrix_filtered, "B")

##SPACE AND TIME FRAME SYNC
space_time_sync_frame = tlmtry.get_space_time_sync_frame(matrix, "B")
space_time_sync_frame = Image.fromarray(space_time_sync_frame)
'''
Get Thermal Image
'''
temp_min = 1000
temp_max = 0
mayor_frame = tlmtry.get_mayor_frame(matrix, "B")
Cs = tlmtry.compute_CS(matrix, "B")
print "Cs:", Cs
print "mayor frame:", mayor_frame
thermal_matrix = thermal.get_temp_3A(frame_B, mayor_frame, satellite, Cs)
Example #31
0
    def train(self):

        with tf.Session() as sess:

            if MODEL is None:

                init_op = tf.global_variables_initializer()

                sess.run(init_op)

            else:

                self.load(sess)

            tf.train.start_queue_runners(sess)

            sess.run(self.model.set_training_mode)

            # self.init_agent()

            for ts in range(N_TRAIN_STEPS):

                # 1. interact with envs. for N_STEPS

                traj_x, traj_actions, traj_d_logits, traj_a_logits, traj_done, traj_adv = [], [], [], [], [], []

                for ns in range(N_STEPS):

                    if self.reset:

                        self.init_env()

                    # self.env.render()

                    x = np.asarray(self.x_buffer).swapaxes(0, 1)

                    a_logits, d_logits = sess.run(
                        [self.a_logits, self.d_logits], {self.x: x})

                    a_logits = np.clip(a_logits, -MAX_A_LOGITS, MAX_A_LOGITS)

                    a_t = utils.sample(a_logits[0], 10)

                    clone_s = self.env.env.clone_full_state()

                    clone_a = a_t

                    state = self.interact(a_t, SKIP_FRAME)

                    if state is None:

                        self.reset = True

                        if len(traj_done) > 0:

                            traj_done[-1] = True

                        continue

                    adv_vec = self.create_adv(self.x_buffer, d_logits[0][0],
                                              clone_s, clone_a, sess, False)

                    self.x_buffer.append(
                        utils.get_frame(state, C_IN, HEIGHT, WIDTH))

                    traj_x.append(list(self.x_buffer))

                    traj_actions.append(a_t)

                    traj_adv.append(adv_vec)

                    traj_d_logits.append(d_logits[0])

                    traj_a_logits.append(a_logits[0])

                    traj_done.append(False)

                # 2. define the advantage

                # adv = np.asarray(self.traj_d_logits)[1:, 0] - np.asarray(self.traj_d_logits)[:-1, 0]
                #
                # adv = adv * (~np.asarray(self.traj_done[:-1]))

                # # TODO: debug
                #
                # if ts < 200:
                #     adv *= 0.

                action_counts = utils.one_hot(traj_actions,
                                              NUM_ACTIONS).sum(axis=0)

                mean_a_logits = np.asarray(traj_a_logits).mean(axis=0)

                # adv_vec = np.expand_dims(adv, 1) * utils.one_hot(self.traj_actions[:-1], NUM_ACTIONS)

                # adv_vec = np.clip(adv_vec, -MAX_ADV, MAX_ADV)

                # mean_adv = adv_vec.mean(axis=0)

                # min_adv = adv_vec.min(axis=0)

                # max_adv = adv_vec.max(axis=0)

                # a_idx = np.asarray(self.traj_actions[:-1])

                # 3. train

                x = np.reshape(traj_x[:-1],
                               [-1, N_FRAMES * C_IN, HEIGHT, WIDTH])

                y = np.ones(x.shape[0], np.int32)

                adv_logits = np.asarray(traj_adv[:-1])

                mean_adv_logits = adv_logits.mean(axis=0)

                _, d_fake, d_expert, g_norm, w_norm, expert_seq, pg_loss, entropy, d_loss,\
                    g_norm_d, g_norm_p, g_norm_ent = sess.run([self.grad_op,
                                                            self.d_logits,
                                                            self.d_logits_ex,
                                                            self.g_norm,
                                                            self.w_norm,
                                                            self.expert_sequence,
                                                            self.pg_loss,
                                                            self.entropy,
                                                            self.d_loss,
                                                            self.grad_norm_d,
                                                            self.grad_norm_p,
                                                            self.grad_norm_ent
                                                                            ],
                                                                           {self.x: x,
                                                                            self.y: y,
                                                                            self.adv_vec: adv_logits,
                                                                            # self.a_idx: a_idx,
                                                                            })

                self.fake_acc = RA * self.fake_acc + (
                    1 - RA) * self.calc_accuracy(d_fake, 1)

                self.expert_acc = RA * self.expert_acc + (
                    1 - RA) * self.calc_accuracy(d_expert, 0)

                if ts % SAVE_INTRVL == 0 and MODE == "train":

                    self.save(sess, ts)

                if ts % PRINT_INTRVL == 0:

                    print(
                        "iter: %5d, fake acc: %.3f, expert acc: %.3f, action_count: %s, mean_a_logits %s, "
                        "mean_adv_logits: %s, "
                        "g_norm_d: %.3f, g_norm_p: %.3f, g_norm_ent: %.4f"
                        # "min_adv: %s, mean_adv: %s, max_adv: %s, w_norm: %f,"
                        # "g_norm: %f, pg_loss: %.6f, entropy: %.2f, d_loss: %.2f"
                        % (
                            ts, self.fake_acc, self.expert_acc, action_counts,
                            mean_a_logits, mean_adv_logits, g_norm_d, g_norm_p,
                            g_norm_ent
                            # min_adv, mean_adv, max_adv,
                            # w_norm, g_norm, pg_loss, entropy, d_loss
                        ))