Example #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--name',
        '-n',
        type=str,
        default='net_seg',
        required=True,
        help=
        'Name this client/slave announces to the server. Needs to match on server side.'
    )
    parser.add_argument(
        '--ip',
        '-ip',
        type=str,
        default='',
        required=False,
        help=
        'Name this client/slave announces to the server. Needs to match on server side.'
    )
    parser.add_argument('-viz',
                        action='store_true',
                        help="Activate this to enable visualization output")
    args = parser.parse_args()

    size = (480, 640)
    logger.info("Starting engine")
    engine = create_engine()
    logger.info("Starting Latent Engine")
    latent_engine = create_latent_engine()
    debg = debugger()

    # Setup Engine
    broker_data = {
        "size": size,
        "engine": engine,
        "latent_engine": latent_engine,
        "viz": args.viz,
        "debugger": debg
    }
    if args.ip:
        config.BROKER_IP = args.ip
    else:
        config.BROKER_IP = utils.find_master(args.name)

    # Defined userdata pkg we give to MMQT
    logger.info("Broker: Connecting")
    client = mqtt.Client(userdata=broker_data)
    client.connect(config.BROKER_IP, config.BROKER_PORT, keepalive=60)

    logger.info("Initialize IMG Client Functions")
    init_img_topic(client)

    # Endless loop that keeps sending all sensor properties and updates once
    # per loop the actuator
    logger.info("Entering Forever Loop")
    client.loop_forever()

    client.disconnect()
Example #2
0
from debugger import debugger

print("start debugger...")
dbg=debugger()
dbg.attach(2776)
#dbg.write_process_memory(1243628,b'\x00\x01\x02\x03')
print(dbg.read_process_memory(1243624,8))
input()
dbg.detach()
Example #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-viz',
                        action='store_true',
                        help="Activate this to enable visualization output")
    parser.add_argument('-b',
                        action='store_true',
                        help="Activate this to enable broadcasting")
    parser.add_argument(
        '-p_rec',
        type=str,
        help=
        'Path to store recorded images. If defined, it automatically actives saving.'
    )
    parser.add_argument(
        '--jetson',
        '-j',
        action='store_true',
        help=
        "This parameter should be set if run from a Jetson board with internal camera"
    )
    parser.add_argument(
        '--expected_slaves',
        '-es',
        required=True,
        nargs='+',
        type=str,
        help=
        "The (list) of clients which need to connect to the server before processing can start."
    )
    parser.add_argument('--interface_robot',
                        '-ir',
                        required=True,
                        type=str,
                        help="Interface to which ev3 is connect.")
    parser.add_argument('--interface_netseg',
                        '-in',
                        required=True,
                        type=str,
                        help="Interface to which ev3 is connect.")
    parser.add_argument('--angle',
                        '-a',
                        required=False,
                        type=int,
                        default=16,
                        help="Interface to which ev3 is connect.")
    args = parser.parse_args()

    network_code = True

    if args.b:
        if not utils.master_announcement(args.expected_slaves,
                                         args.interface_netseg,
                                         message_delay=3,
                                         repeats=2):
            pass
        if not utils.master_announcement(args.expected_slaves,
                                         args.interface_robot,
                                         message_delay=3,
                                         repeats=2):
            pass

    # else:
    config.BROKER_IP = utils.find_interface_ip(args.interface_netseg)

    # sleep shortly so every service can get ready...ye sucks
    time.sleep(2)

    # Start the MMQT Messaging system
    logger.info("MMQT Broker: Initalization")
    broker = Broker.Broker(config.BROKER_IP,
                           config.BROKER_PORT,
                           broker_data={"seg_q": seg_q})
    logger.info("Broker: Connecting")
    broker.connect()
    broker.init_img_topic()
    broker.start_listen()

    controller = Controller.RobotController(broker)
    robot = EV3Robot(controller, jetson_mode=args.jetson)

    logger.info("Initializing Robot")
    robot.startup()

    # Set up engines
    logger.info("Starting Segmentation Engine")
    #segment_engine = create_engine()
    segment_engine = None
    logger.info("Starting Latent Engine")
    #latent_engine = create_latent_engine()
    latent_engine = None
    logger.info("Setting up tracker Engine")
    marker_tracker = MarkerTracker.MarkerTracker()

    #######################################################
    # ALL THE OTHER CODE FOR EXPLORATION AND SHIT IN HERE #
    #######################################################
    robot.cam["ps3_one"].open()
    robot.cam["jetson_onboard"].open()
    # #### Get images
    logger.info("Starting Environment Exploration Phase")
    # path = '/home/nvidia/Documents/Ach8/Raw_ims/'

    count = 1
    markerIDs = set()
    brick_exp_frames = []
    for round_num in range(args.angle):
        robot.controller.turn_in_place(+28, 1000)

        # start = time.time()
        # elapsed = time.time() - start

        for i in range(6):
            frame = robot.cam["ps3_one"].read()
            frame2 = robot.cam["jetson_onboard"].read()
            if i % 2 == 0:
                brick_exp_frames.append(frame)
                # brick_exp_frames.append(frame2)

        ids = marker_tracker.update(frame2)
        logger.debug("ids: " + str(ids))
        for id in ids:
            markerIDs.add(id[0])
        # name = str(count).zfill(4)
        # cv2.imwrite(path + name + '.png', frame)
        # count += 1

        # while elapsed < 0.5:
        #     for i in range(7):
        #         frame = robot.cam["ps3_one"].read()
        #         frame2 = robot.cam["jetson_onboard"].read()
        #     ids = marker_tracker.update(frame2)
        #     logger.debug("ids: " + str(ids))
        #     for id in ids:
        #         markerIDs.add(id[0])
        #     name = str(count).zfill(4)
        #     cv2.imwrite(path + name + '.png', frame)
        #     count += 1
        #     elapsed = time.time() - start

    robot.cam["jetson_onboard"].close()
    robot.cam["ps3_one"].close()

    ##### Set up engines
    if not network_code:
        logger.info("Starting Segmentation Engine")
        # segment_engine = create_small_engine()
        segment_engine = create_engine()
        logger.info("Starting Latent Engine")
        # latent_engine = create_small_latent_engine()
        latent_engine = create_latent_engine()
    else:
        segment_engine = None
        latent_engine = None

    ##### Get latents
    num_latent_var = 32
    all_latents = np.empty([1, num_latent_var])
    # for j, im_filename in enumerate(sorted(os.listdir(path))):
    for j, frame in enumerate(brick_exp_frames):
        # logger.debug("Reading image from "+ path + im_filename)
        logger.debug("Computing latents for frame %d", j)
        # frame = cv2.imread(path + im_filename, cv2.IMREAD_COLOR)

        if network_code:
            reset_seg_q()
            robot.controller.broker.net_segment_image(frame,
                                                      compute_latent=True,
                                                      occlude=True)
            most_recent_segmentation = seg_q.get()
            latents = utils.decode_list_of_numpy_arrays(
                most_recent_segmentation["latents"])
            logger.debug(str(latents))
            coords = most_recent_segmentation["coords"]
        else:
            latents, coords, small_ims, mask, elapsed = image_to_latents(
                frame,
                segment_engine,
                latent_engine,
                scale=1,
                occlude=False,
                mask_image=False)

        for i, latent in enumerate(latents):
            all_latents = np.concatenate((all_latents, latents[i]), axis=0)
        #print("image " + str(j) + " / " + str(len(os.listdir(path))) + " elapsed: " + str(elapsed) + " num objects: " + str(len(latents)))
    latents = np.array(all_latents)
    logger.debug(str(latents.shape[0]) + " scans of bricks.")
    logger.debug(str(latents.shape) + "latent dim.")
    #np.save('/home/nvidia/Documents/Ach8/latents', latents)

    if latents.shape[0] < 3 * len(markerIDs):
        logger.error(
            "WARNING NOT ENOUGH BRICK SCANS FOUND FOR GOOD ESTIMATION")

    n_components = len(markerIDs)
    logger.debug("Found %d boxes.", n_components)
    dbg = debugger()
    gmm = mixture.GaussianMixture(n_components=n_components,
                                  covariance_type='full')
    gmm.fit(latents)

    # logger.info("All_analysis finished, elapsed: " + str(time.time() - tot_analysis_start))

    #######################################################
    # ACTUAL BRICK SORTING COMES NOW #
    #######################################################
    logger.info("Starting Brick Sorting Loop")
    while True:
        try:
            logger.debug("reseting Search-and-Reach Brick loop function")
            delattr(search_and_reach_brick, "track_count")
            delattr(search_and_reach_brick, "prev_x")
            delattr(search_and_reach_brick, "prev_y")
        except:
            pass

        logger.info("Starting Search-and-Reach Brick Phase")
        robot.cam["ps3_one"].open()
        picking_bricks = True
        robot.controller.lower_gripper()

        # Get the net segmenter ready for processing
        #
        # This list will keep images of the tracked brick which we will use later to compute the mean latent feature
        tracked_brick_small_ims = []

        # This list can be used to store all frames since the last tracking update through segmentation for playback
        frames_since_last_seg = []

        # Read first frame
        frame = robot.cam["ps3_one"].read()
        # send it to the net_seg
        logger.debug("SRB: Send initial image to network segmenter")
        reset_seg_q()
        robot.controller.broker.net_segment_image(frame,
                                                  compute_latent=False,
                                                  return_mask=True,
                                                  scale=0.5,
                                                  occlude=True)
        #  wait for results
        most_recent_segmentation = seg_q.get()
        logger.debug(most_recent_segmentation.keys())
        # and start tracker for closest brick
        logger.debug("SRB: Instantiate tracker for closest brick.")
        if most_recent_segmentation["coords"]:
            active_trackers, new_boxes = multitracking.instantiate_trackers(
                utils.decode_numpy_array(most_recent_segmentation["mask"]),
                frame,
                track_closest=True)
            tracked_brick_small_ims.append(
                multitracking.get_img_from_bbox(frame, new_boxes[0]))
            # Update tracker and send off next frame to keep the loop running! IMPORTANT!
        else:
            active_trackers = []
            new_boxes = []

        frame = robot.cam["ps3_one"].read()
        logger.debug(
            "SRB: Send second, loop starting image to network segmenter")
        robot.controller.broker.net_segment_image(frame,
                                                  compute_latent=False,
                                                  return_mask=True,
                                                  scale=0.5,
                                                  occlude=True)
        # active_trackers, new_boxes = multitracking.update_active_trackers(active_trackers, frame)
        # tracked_brick_small_ims.append(multitracking.get_img_from_bbox(frame, new_boxes[0]))

        # robot.controller.lift_gripper()
        while picking_bricks:
            running = True
            while running:
                logger.debug("SRB: starting main routine")
                tim = time.time()
                state = search_and_reach_brick(robot,
                                               segment_engine,
                                               latent_engine,
                                               config,
                                               active_trackers,
                                               new_boxes,
                                               tracked_brick_small_ims,
                                               frames_since_last_seg,
                                               visualize=args.viz,
                                               record=False)
                logger.info("SBR took: %f", time.time() - tim)
                if state == "lost":
                    logger.debug("Lost track of brick, restarting search")
                elif state == "reached":
                    running = False

            picking_bricks = False

            robot.controller.stop_main_motors()

            tbsi = len(tracked_brick_small_ims)
            logger.debug(
                "SRB: reached brick, computing latents from %d mini images",
                len(tracked_brick_small_ims))
            brick_latents = []
            for idx, im in enumerate(
                    tracked_brick_small_ims):  # TODO: RANDOM SAMPLE?
                logger.debug("SRB: reached brick, processing frame %d", idx)
                robot.controller.broker.net_segment_image(np.array(im),
                                                          compute_latent=True,
                                                          return_mask=False,
                                                          final_latent=True,
                                                          occlude=True)
                most_recent_segmentation = seg_q.get()
                if most_recent_segmentation["latents"]:
                    brick_latents.append(
                        utils.decode_numpy_array(
                            most_recent_segmentation["latents"][0]))

            for idx, im in enumerate(tracked_brick_small_ims[-10:-1]):
                robot.controller.broker.net_segment_image(np.array(
                    tracked_brick_small_ims[idx]),
                                                          compute_latent=True,
                                                          return_mask=False,
                                                          final_latent=True,
                                                          occlude=True)
                most_recent_segmentation = seg_q.get()
                if most_recent_segmentation["latents"]:
                    brick_latents.append(
                        utils.decode_numpy_array(
                            most_recent_segmentation["latents"][0]))

            reset_seg_q()
            frame = robot.cam["ps3_one"].read()
            robot.controller.broker.net_segment_image(frame,
                                                      compute_latent=True,
                                                      return_mask=True,
                                                      scale=0.5,
                                                      occlude=True)
            #  wait for results
            most_recent_segmentation = seg_q.get()
            logger.debug(most_recent_segmentation.keys())
            # and start tracker for closest brick
            logger.debug("SRB: Instantiate tracker for closest brick.")
            if most_recent_segmentation["coords"]:
                active_trackers, new_boxes = multitracking.instantiate_trackers(
                    utils.decode_numpy_array(most_recent_segmentation["mask"]),
                    frame,
                    track_closest=True)
                tracked_brick_small_ims.append(
                    multitracking.get_img_from_bbox(frame, new_boxes[0]))
                robot.controller.broker.net_segment_image(
                    tracked_brick_small_ims[0],
                    compute_latent=True,
                    return_mask=True,
                    scale=0.5,
                    occlude=True)
                brick_latents.append(
                    utils.decode_numpy_array(
                        most_recent_segmentation["latents"][0]))

            # for _ in range(5):
            #     frame = robot.cam["ps3_one"].read()
            #     robot.controller.broker.net_segment_image(frame, compute_latent=True, return_mask=False)
            #     most_recent_segmentation = seg_q.get()
            #     if most_recent_segmentation["latents"]:
            #         brick_latents.append(utils.decode_numpy_array(most_recent_segmentation["latents"][0]))

            # delete the stuff
            del tracked_brick_small_ims[:]

            # ############################################################
            # COMPUTE LATENT MEAN HERE
            # DECIDE WHICH CLUSTER!
            ############################################################
            logger.debug(np.array(brick_latents).shape)
            bricks_latents_arry = np.array(brick_latents)
            print(np.array(brick_latents))
            cluster_labels_mix = gmm.predict(bricks_latents_arry[:, 0, :])
            logger.debug(bricks_latents_arry[:, 0, :].shape)
            logger.debug(cluster_labels_mix.shape)
            label, count = nps.mode(cluster_labels_mix, axis=0)
            label = label[0]
            logger.debug("desired label %s:", label)
            logger.debug(cluster_labels_mix)
            # label = int(np.mode(cluster_labels_mix))

            logger.info("Starting PickUp")
            # robot.controller.move(60, 60)
            # time.sleep(0.2)

            for _ in range(30):
                frame = robot.cam["ps3_one"].read()

            #picking_bricks = not is_brick_in_gripper(robot, frame, segment_engine)
            picking_bricks = False
            if picking_bricks:
                logger.info("Oh no! I lost the brick!")
                robot.controller.move(-45, -45)
                time.sleep(0.5)

                robot.controller.open_gripper()
                robot.controller.lower_gripper()

        robot.cam["ps3_one"].close()

        robot.controller.lower_gripper()
        robot.controller.close_gripper()
        robot.controller.lift_gripper()

        # Brick is picked up
        # Robot moves towards box
        markerIDs_list = list(markerIDs)
        markerID = markerIDs_list[label]

        robot.cam["jetson_onboard"].open()
        running = True
        logger.info("Start looking for box with ID " + str(markerID) + " now!")
        while running:
            running = search_box(robot, marker_tracker, markerID)
            logger.debug("Running.")

        robot.controller.stop_main_motors()
        logger.info("Found box.")
        # robot.cam["jetson_onboard"].close()

        # Now actually track it!
        running = True
        # robot.cam["jetson_onboard"].open()
        logger.info("Driving to box " + str(markerID) + " now!")
        state = "first_cam"
        switched = False

        while running:
            last_state = state
            state = drive_to_box(robot, marker_tracker, markerID, args.viz,
                                 state)
            logger.debug("state=%s, des_box=%d, all_box=%s, small_ims_used=%d",
                         state, markerID, markerIDs_list, tbsi)
            if state == "second_cam" and not switched:
                logger.debug("Box-Search: Switching to ps3 cam")
                robot.cam["jetson_onboard"].close()
                robot.cam["ps3_one"].open()
                robot.controller.move(65, 65)
                time.sleep(0.5)
                robot.controller.stop_main_motors()
                switched = True
            elif state == "lost":
                logger.debug("lost marker. backing up")
                robot.controller.move(-65, -65)
                time.sleep(0.5)
                robot.controller.stop_main_motors()
                time.sleep(0.5)
                running = True
                robot.cam["jetson_onboard"].open()
                robot.cam["ps3_one"].close()
                state = "first_cam"
                switched = False
            elif state == "inside_box":
                logger.debug("inside box")
                robot.cam["jetson_onboard"].close()
                robot.controller.move(60, 60)
                time.sleep(0.7)
                robot.controller.stop_main_motors()
                running = False
            #     searching = True
            #     while searching:
            #         searching = search_home_base(robot, tracker, config, args.viz)

        if state == "inside_box":
            robot.controller.stop_main_motors()
            robot.controller.open_gripper()
            robot.controller.stop_main_motors()
            robot.cam["jetson_onboard"].close()

        robot.controller.open_gripper()
        logger.info("Looking for new work opportunities")
        robot.controller.move(-65, -65)
        time.sleep(4)
        robot.controller.stop_main_motors()
        robot.controller.move(80, -80)
        time.sleep(8)
        robot.controller.stop_main_motors()

    print("shutting down...im so sleepy")
Example #4
0
import debugger

debugger = debugger.debugger()

pid = raw_input("Enter the PID of the process to attach to: ")

debugger.attach(int(pid))

printf = debugger.func_resolve("msvcrt.dll", "printf")
print "[*] Address of printf: 0x%08x" % printf
debugger.bp_set_mem(printf, 10)

debugger.run()
Example #5
0
import datetime
import os
from subprocess import check_output
from sys import exit

from colorama import init, Fore

import debugger
import utilities
import tools
from pydbg.defines import *

init(autoreset=True)
currentTime = datetime.datetime.now()
utils = utilities.Utilities.getInstance()
dbg = debugger.debugger()


class Parser:
    __instance = None

    @staticmethod
    def getInstance():
        """ Static access method. """
        if Parser.__instance is None:
            Parser()
        return Parser.__instance

    def __init__(self):
        Parser.__instance = self
        self.processName = "pywindbg"  # Currently debugged process, 'pywindbg' to start out with
Example #6
0
import debugger

unpackIt = debugger.debugger()

#packed_malware = input("Enter the path of the file to unpack: ")

#unpackIt.load("C:\\Users\\rjimersonjr\\Documents\\GitHub\\helloWorldBinary\\helloWorldBinary\\Release\\helloWorldBinary.exe")
path_to_dll = "C:\\Users\\rjimersonjr\\Documents\\GitHub\\malDLL\\malDLL\\Release\\dllToHook.dll"
packed_malware = "C:\\Users\\rjimersonjr\\Documents\\GitHub\\helloWorldBinary\\helloWorldBinary\\Release\\helloWorldBinary.exe"
unpackIt.load(packed_malware, path_to_dll)
Example #7
0
import debugger

debugger = debugger.debugger()

debugger.load("C:\\WINDOWS\\system32\\calc.exe")
Example #8
0
from vm import *
from debugger import debugger
v = Vm('challenge.bin')
d = debugger(v)

m = v.memory.memory
for i in range(0, len(m)):
    try:
        print "%i: %s, %i" % (i, d.codes[m[i]], m[i])
    except KeyError:
        print "%i: %s" % (i, m[i])
Example #9
0
from debugger import debugger
debugger = debugger()
#debugger.load('C:\\Windows\\System32\\calc.exe')
pid = input("Enter the PID process program: ")
debugger.attach(int(pid))
LIST = debugger.enumerate_threads()

# For each thread grab the value of each of the registers

for thread in LIST:
    thread_context = debugger.get_thread_context(thread)
    print("[*] Dumping registers for thread ID: 0x%08x " % (thread))
    print("[**] EIP: 0x%08x " % (thread_context.Eip))
    print("[**] ESP: 0x%08x " % (thread_context.Esp))
    print("[**] EBP: 0x%08x " % (thread_context.Ebp))
    print("[**] EAX: 0x%08x " % (thread_context.Eax))
    print("[**] EBX: 0x%08x " % (thread_context.Ebx))
    print("[**] ECX: 0x%08x " % (thread_context.Ecx))
    print("[**] EDX: 0x%08x " % (thread_context.Edx))
    print("[*] END DUMP")
debugger.detach()