def evaluate_pair(self, threshold, distance):  #, visualize):
        """ Funtion to load the overlapping keypoints
        and check the number of matches using 
        adjacency matrix
        """
        H = np.loadtxt(self.homography_path)
        point_qry, point_trg, point_qry_proj_on_trg, point_trg_proj_on_qry = get_overlapping_kp(
            kp_qry, kp_trg, H, qry_img_shape, trg_img_shape)

        point_qry_len = len(point_qry)
        point_trg_len = len(point_trg)
        # Get the distance matrix
        if point_qry_len == 0 or point_trg_len == 0:
            dist_in_trg_img = np.array([])
            dist_in_qry_img = np.array([])
        else:
            dist_in_trg_img = get_dist(point_qry_proj_on_trg, point_trg,
                                       distance)
            dist_in_qry_img = get_dist(point_trg_proj_on_qry, point_qry,
                                       distance)
        eval_results = evaluate_matches(dist_in_trg_img, dist_in_qry_img,
                                        point_qry_len, point_trg_len,
                                        threshold)

        cache_wg.append((self.sequence, self.homography_name, distance,
                         threshold, eval_results))
Example #2
0
def get_local_max_grad_1d(grad_func, line, pt, resolution=0.1):
    x, y = pt
    a, b, c = line
    max_grad1, max_grad2 = grad_func(x, y), grad_func(x, y)
    target1, target2 = np.array((x, y)), np.array((x, y))
    drop_cnt1, drop_cnt2 = 0, 0
    center = False
    idx = 1
    while (drop_cnt1 < int(1/resolution)) or (drop_cnt2 < int(1/resolution)):
        if abs(a) <= abs(b): # flatten slope
            x1 = x - idx * resolution
            x2 = x + idx * resolution
            y1 = - (a * x1 + c) / b
            y2 = - (a * x2 + c) / b
        else: # steep slope
            y1 = y - idx * resolution
            y2 = y + idx * resolution
            x1 = - (b * y1 + c) / a
            x2 = - (b * y2 + c) / a
        grad1, grad2 = grad_func(x1, y1), grad_func(x2, y2)
        if (idx == 1) and (grad_func(x, y) > max(grad1, grad2)): center = True
        if drop_cnt1 < int(1/resolution):
            if (grad1 > max_grad1):
                max_grad1 = grad1
                target1 = np.array((x1, y1))
                drop_cnt1 = 0
            else:
                drop_cnt1 += 1
        if drop_cnt2 < int(1/resolution):
            if (grad2 > max_grad2):
                max_grad2 = grad2
                target2 = np.array((x2, y2))
                drop_cnt2 = 0
            else:
                drop_cnt2 += 1
        idx += 1
    # dist1 = np.sqrt(np.sum((target1 - np.array((x, y)))**2))
    # dist2 = np.sqrt(np.sum((target2 - np.array((x, y)))**2))
    dist1 = get_dist(target1, np.array((x, y)))
    dist2 = get_dist(target2, np.array((x, y)))
    if dist1 == dist2:
        if max_grad1 > max_grad2: return target1
        else: return target2
    elif dist1 > dist2:
        if dist2 > 0 or center: return target2
        else: return target1
    else:
        if dist1 > 0 or center: return target1
        else: return target2
Example #3
0
def grad_improve(img, lines, terminals, num_dpts=8, line_thresh=-1, pt_thresh=-1):
    assert len(lines) == len(terminals), f"[grad_improve]: # of lines ({len(lines)}) != # of terminal pairs ({len(terminals)})!"
    grad_func = get_gradient_func(img)
    new_lines, new_terminals = list(), list()
    num_improved_lines = 0
    for line, terminal in zip(lines, terminals):
        # line, terminal = lines[7], terminals[7]
        new_pts = list()
        dpts, dls = get_dividing_pts_and_lines(line, terminal, num=num_dpts) # dividing points & lines
        for dpt, dl in zip(dpts, dls): # improve sampled points on vertical direction
            new_pt = get_local_max_grad_1d(grad_func, dl, dpt) # improved point
            # fig, axes = display_improved_grad(img, dpt, dl, new_pt, grad_func)
            # plt.show()
            new_pts.append(new_pt)
            # exit()
        new_pts = np.vstack(new_pts) # improved points for the 'line'
        ## estimation of the white border width ##
        segment_length = get_dist(terminal[0, :], terminal[1, :])
        segment_width  = W2L * segment_length
        ## check point threshold: any new point should be bounded by the threshold, otherwise the original point is kept ##
        if (0 <= pt_thresh <= 1):
            outlider_idx = get_dist(dpts, new_pts) > (pt_thresh * segment_width)
            new_pts[outlider_idx, :] = np.vstack(dpts)[outlider_idx, :]
        ## check line threshold: number of improved points within threshold should be more than half ##
        if (0 <= line_thresh <= 1) and \
            (np.sum(get_dist(dpts, new_pts) <= (line_thresh * segment_width)) <= 0.5 * num_dpts): # less than half - abondon improvement
            new_line, new_terminal = line.copy(), terminal.copy()
        else: # keep improvement
            [new_line], [new_terminal] = generate_lines(new_pts, 1, dist_thresh=0.5, correctness_thresh=2, kernel_size=2, faster=False, keep_trace=False)
            num_improved_lines += 1
        ## terminal adjustment ##
        if abs(new_line[0]) <= abs(new_line[1]): # flatten slope
            xmin = min(terminal[0, 0], terminal[1, 0], new_terminal[0, 0], new_terminal[1, 0])
            xmax = max(terminal[0, 0], terminal[1, 0], new_terminal[0, 0], new_terminal[1, 0])
            new_terminal = np.array([
                [xmin, - (new_line[0]*xmin + new_line[2]) / new_line[1]],
                [xmax, - (new_line[0]*xmax + new_line[2]) / new_line[1]]
            ])
        else: # steep slope
            ymin = min(terminal[0, 1], terminal[1, 1], new_terminal[0, 1], new_terminal[1, 1])
            ymax = max(terminal[0, 1], terminal[1, 1], new_terminal[0, 1], new_terminal[1, 1])
            new_terminal = np.array([
                [- (new_line[1]*ymin + new_line[2]) / new_line[0], ymin],
                [- (new_line[1]*ymax + new_line[2]) / new_line[0], ymax]
            ])
        new_lines.append(new_line)
        new_terminals.append(new_terminal)
    return new_lines, new_terminals, num_improved_lines
def eval_pairs(user_vecs, rel_mat, t, dist_func):
    mean, std, max, min = ut.get_stats(user_vecs, dist_func)
    pairs = find_pairs_above_threshold(rel_mat, t)
    num_pairs_found = 0
    dist_sum = 0
    for ui, uj in pairs:
        dist = ut.get_dist(user_vecs, ui, uj, dist_func)
        if dist is not None:
            num_pairs_found += 1
            norm_dist = np.divide(np.subtract(dist, mean), std)
            dist_sum += norm_dist
    return dist_sum/num_pairs_found
Example #5
0
    def __init__(self,
                 useDevelopmentRelease=False,
                 useProposed=False,
                 forceLTS=False,
                 forceDownload=False):
        self._debug("MetaRelease.__init__() useDevel=%s useProposed=%s" %
                    (useDevelopmentRelease, useProposed))
        # force download instead of sending if-modified-since
        self.forceDownload = forceDownload
        # information about the available dists
        self.downloading = True
        self.new_dist = None
        self.current_dist_name = get_dist()
        self.no_longer_supported = None

        # default (if the conf file is missing)
        self.METARELEASE_URI = "http://changelogs.ubuntu.com/meta-release"
        self.METARELEASE_URI_LTS = "http://changelogs.ubuntu.com/meta-release-lts"
        self.METARELEASE_URI_UNSTABLE_POSTFIX = "-development"
        self.METARELEASE_URI_PROPOSED_POSTFIX = "-development"

        # check the meta-release config first
        parser = ConfigParser.ConfigParser()
        if os.path.exists(self.CONF_METARELEASE):
            try:
                parser.read(self.CONF_METARELEASE)
            except ConfigParser.Error, e:
                sys.stderr.write("ERROR: failed to read '%s':\n%s" %
                                 (self.CONF_METARELEASE, e))
                return
            # make changing the metarelease file and the location
            # for the files easy
            if parser.has_section("METARELEASE"):
                sec = "METARELEASE"
                for k in [
                        "URI", "URI_LTS", "URI_UNSTABLE_POSTFIX",
                        "URI_PROPOSED_POSTFIX"
                ]:
                    if parser.has_option(sec, k):
                        self._debug(
                            "%s: %s " %
                            (self.CONF_METARELEASE, parser.get(sec, k)))
                        setattr(self, "%s_%s" % (sec, k), parser.get(sec, k))
Example #6
0
def taxi():
    startloc = request.args.get('loc1')
    endloc = request.args.get('loc2')
    dist = utils.get_dist(startloc, endloc)
    price = ""
    print(dist)
    if dist == None:
        dist = "unknown miles"
        price = "unknown price"
    else:
        if 'm' in dist:
            index = dist.index('m') - 1
            dist = dist[:index]
            price = utils.findTaxiFare(float(dist))
        elif 'f' in dist:
            dist = "unknown miles"
        
    
    return render_template('anal.html', dist=dist, price=round(price,2))
Example #7
0
    def __init__(self, 
                 useDevelopmentRelease=False, 
                 useProposed=False,
                 forceLTS=False,
                 forceDownload=False):
        self._debug("MetaRelease.__init__() useDevel=%s useProposed=%s" % (useDevelopmentRelease, useProposed))
        # force download instead of sending if-modified-since
        self.forceDownload = forceDownload
        # information about the available dists
        self.downloading = True
        self.new_dist = None
        self.current_dist_name = get_dist()
        self.no_longer_supported = None

        # default (if the conf file is missing)
        self.METARELEASE_URI = "http://changelogs.ubuntu.com/meta-release"
        self.METARELEASE_URI_LTS = "http://changelogs.ubuntu.com/meta-release-lts"
        self.METARELEASE_URI_UNSTABLE_POSTFIX = "-development"
        self.METARELEASE_URI_PROPOSED_POSTFIX = "-development"

        # check the meta-release config first
        parser = ConfigParser.ConfigParser()
        if os.path.exists(self.CONF_METARELEASE):
            try:
                parser.read(self.CONF_METARELEASE)
            except ConfigParser.Error, e:
                sys.stderr.write("ERROR: failed to read '%s':\n%s" % (
                        self.CONF_METARELEASE, e))
                return
            # make changing the metarelease file and the location
            # for the files easy
            if parser.has_section("METARELEASE"):
                sec = "METARELEASE"
                for k in ["URI",
                          "URI_LTS",
                          "URI_UNSTABLE_POSTFIX",
                          "URI_PROPOSED_POSTFIX"]:
                    if parser.has_option(sec, k):
                        self._debug("%s: %s " % (self.CONF_METARELEASE,
                                                 parser.get(sec,k)))
                        setattr(self, "%s_%s" % (sec, k), parser.get(sec, k))
def citire_fisier(filename):
    with open(filename) as fp:
        lines = fp.readlines()
        lines = lines[3:]
        n = int(lines[0].split(" ")[2])
        lines = lines[3:-1]

        net = {}
        net['noNodes'] = n

        mat = []

        for i in range(n):
            mat.append([])
            for _ in range(n):
                mat[i].append(0)

        pos = []
        for line in lines:
            vect = line.split(" ")
            pos.append([int(vect[0]), int(vect[1]), int(vect[2])])

        for i in range(n):
            for j in range(n):
                mat[pos[i][0] - 1][pos[j][0] - 1] = int(
                    get_dist([pos[i][1], pos[i][2]], [pos[j][1], pos[j][2]]))
        net["mat"] = mat
        degrees = []
        noEdges = 0
        for i in range(n):
            d = 0
            for j in range(n):
                if (mat[i][j] == 1):
                    d += 1
                if (j > i):
                    noEdges += 1
            degrees.append(d)
        net["noEdges"] = noEdges
        net["degrees"] = degrees
        return net
 def __init__(self, new_dist, progress):
     self.new_dist = new_dist
     self.current_dist_name = get_dist()
     self._progress = progress
     # options to pass to the release upgrader when it is run
     self.run_options = []
Example #10
0
 def __init__(self):
     self.config = setting
     self.code = 1
     self.messages = []
     self.dist, self.version, self.release = health_check_utils.get_dist()
Example #11
0
    def attack(self, inputs, targets):

        # GRADIENT ESTIMATION EVAL
        def get_grad_est(x, batch_lab, num_batches):
            losses = []
            grads = []
            for _ in range(num_batches):
                final_losses, grad_estimate = self.sess.run(
                    [self.final_losses, self.grad_estimate], {
                        self.img: x,
                        self.lab: batch_lab
                    })
                losses.append(final_losses)
                grads.append(grad_estimate)
            grads = np.array(grads)
            losses = np.array(losses)
            return losses.mean(), np.mean(grads, axis=0, keepdims=True)

        adv = []
        adv = np.array(inputs)
        query_images = []
        query_images = np.zeros((len(inputs)))
        time_images = []
        time_images = np.zeros((len(inputs)))

        succ = 0

        for i in range(len(inputs)):
            start = time.time()
            batch_data = inputs[i:i + 1]
            batch_lab = targets[i:i + 1]

            x = batch_data
            num_batches = 1
            max_lr = self.lr
            current_ep = self.epsilon * self.lambd
            num_queries = 0
            last_ls = []

            print('--------------------')
            for iteration in range(self.nb_iter):
                loss, pred, eval_adv, true_grad = self.sess.run(
                    [self.tloss, self.pred, self.eval_adv, self.gradients], {
                        self.img: x,
                        self.lab: batch_lab
                    })

                # Get zeroth-order gradient estimates
                l, grad = get_grad_est(x, batch_lab, num_batches)
                num_queries += num_batches * self.grad_est_batch_size * 2

                # LR Decaying
                current_lr = self.lr / (iteration + 1)**0.5

                grad_normalized = grad_normalization(grad, self.ord)
                #                 grad_normalized = grad_normalization(true_grad, self.ord)

                v = -current_ep * grad_normalized + batch_data
                d = v - x

                g = self.epsilon * np.sum(np.abs(true_grad)) - np.sum(
                    (batch_data - x) * true_grad)

                x = x + current_lr * d

                eta = x - batch_data
                x = batch_data + norm_ball_proj_inner(eta, self.ord,
                                                      self.epsilon)
                x = np.clip(x, self.clip_min, self.clip_max)

                succ += eval_adv

                if self.test:
                    if succ == 1:
                        print('succ, queries: ', num_queries)
                    print(g)
                    if g < 1:
                        break
                else:
                    if iteration % self.output_steps == 0:
                        dist = get_dist(x, batch_data, self.ord)
                        print(
                            "Iter: {}, Loss: {:0.5f}, Queries: {},  Dist: {:0.5f}, Eps:  {}, lr: {:.5f}, Pred: {},  Eval: {}, g: {}"
                            .format(iteration, l, num_queries, dist,
                                    current_ep, current_lr, pred, eval_adv, g))
                    if eval_adv:
                        break

            time_images[i] = time.time() - start
            query_images[i] = num_queries
            if eval_adv:
                adv[i] = x
            print('Succ ', succ, ' / ', (i + 1), ' rate: ', succ / (i + 1))
        print('Total Succ ', succ, ' / ', len(inputs), ' rate: ',
              succ / len(inputs))
        #         print (adv[0], query_images)
        return adv, query_images, time_images, succ / len(inputs)
Example #12
0
    def attack(self, inputs, targets):
        eps = eps_search(self.epsilon, self.ord)
        adv = []
        adv = np.array(inputs)
        time_images = np.zeros((len(inputs)))
        stop_iter = np.zeros((len(inputs)))
        
        index_set = np.arange(0,len(inputs))

        for ep in eps:
            print ('Searching for ep@', ep)
            if(len(index_set) != 0):
                preds = []
                adv_remain = []
                time_remain = []
                stop_iter_remain = []
                
                current_ep = ep*self.lambd

                for i in range(0,len(index_set),self.batch_size):
                    start_time = time.time()
                    start = i
                    end = min(i+self.batch_size, len(inputs))
                    ind = index_set[start:end]
                    if len(ind) < self.batch_size:
                        ind = np.pad(ind, (0, self.batch_size - len(ind)), mode='constant', constant_values=0)
                    
                    batch_data = inputs[ind]
                    batch_lab = targets[ind]

                    x = np.copy(batch_data)

                    last_ls = []
                    max_lr = self.lr
        
                    print ('--------------------')
                    for iteration in range(self.nb_iter):
                        loss, pred, eval_adv, grad = self.sess.run([self.tloss, self.pred, self.eval_adv, self.gradients], {self.img: x, self.lab: batch_lab})
                        

                        
                        grad_normalized = grad_normalization(grad, self.ord)
                        
                        v = - current_ep * grad_normalized + batch_data
                        d = v - x
                        
                        g = np.sum(d* -grad)
 
                        current_lr = max_lr
                           
 
                        x = x + current_lr * d
                            
                        eta = x - batch_data
                        x = batch_data + norm_ball_proj_inner(eta, self.ord, ep)
                    
                        x = np.clip(x, self.clip_min, self.clip_max)
                        
                        
                        if self.test:
                            print (g)
                            if g < 1:
                                break
                        else:
                            if iteration % self.output_steps == 0:
                                dist = get_dist(x, batch_data, self.ord)
                                print ('Iter: ', iteration, 'Loss: ', loss, ' Dist: ', dist , ' Pred: ' , pred, ' Eval: ', eval_adv.all(), ' g: ', g)
                            if eval_adv.all():
                                break

                        
                        last_ls.append(loss)
                        last_ls = last_ls[-5:]
                        if last_ls[-1] > 0.999 * last_ls[0] and len(last_ls) == 5:
                            print (last_ls)
                            print("Early stopping because there is no improvement")
                            break
    
    
                    preds.extend(eval_adv)
                    adv_remain.extend(x)
                    time_remain.extend(np.ones(len(batch_lab))*(time.time() - start_time)/len(batch_lab))
                    stop_iter_remain.append(iteration)
                preds = np.array(preds)
                adv_remain = np.array(adv_remain)
                time_remain = np.array(time_remain)
                stop_iter_remain = np.array(stop_iter_remain)
                succ_ind = [j for j in range(len(index_set)) if preds[j] == True]
                if(self.epsilon == 0.):
                    adv[index_set[succ_ind]] = adv_remain[succ_ind] 
                    time_images[index_set[succ_ind]] = time_remain[succ_ind] 
                    stop_iter[index_set[succ_ind]] = stop_iter_remain[succ_ind] 
                else:
                    adv = adv_remain
                    time_images = time_remain
                    stop_iter = stop_iter_remain
                index_set = np.delete(index_set, succ_ind, 0)
                print('Remaining: ', len(index_set))
                print ('Succ ', len(inputs) - len(index_set), ' / ', len(inputs), ' rate: ', 1 - len(index_set)/ len(inputs)) 
                
#         print (adv[0], stop_iter)
        return adv, time_images, 1 - len(index_set)/ len(inputs), stop_iter
Example #13
0
def main():
    cfg = get_default_config()

    # init detector network
    print('init detector network...')
    odapi = DetectorAPI(path_to_ckpt=cfg.detector.load_weights)
    print("done!")

    # init intel camera
    print('init intel camera...')
    pipe = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
    # Start streaming
    pipe.start(config)
    print("done!")

    # init banks
    print("init gallary...")
    body_bank = []  # gallery array of persons
    body_bank_dist = []  #
    # body_bank_bb = []
    body_im_array = []
    print("done!")

    if cfg.visualisation.key:
        print("start visualisation mode...")
        plt.ion()
        print("done!")

    curTime = 0
    with tf.Session() as sess:
        args_save = True
        if args_save:
            video_writer = cv2.VideoWriter(cfg.video.path,
                                           cv2.VideoWriter_fourcc(*'XVID'), 6,
                                           (cfg.image.width, cfg.image.height))

        while True:
            curTime += 1
            #print(curTime)

            #-----------------------------------------

            # pipe to get image from intel cam
            frameset = pipe.wait_for_frames()
            color_frame = frameset.get_color_frame()
            color = np.asanyarray(color_frame.get_data()).copy()
            #print(np.shape(color), type(color))
            # rgb_frame = make_rgb(color)

            frameL = cv2.resize(color, (cfg.image.width, cfg.image.height))

            # -----------------------------------------
            # choose start frame point
            if curTime < 10:
                continue
            # choose fps
            if curTime % 1 != 0:
                continue
            # -----------------------------------------
            if frameL is None:
                break
            # -----------------------------------------

            # -----------------------------------------
            # get bbox of objects
            detections, scores, classes, num = odapi.processFrame(frameL)

            # get bbox of persons
            mask = np.array([
                a & b for a, b in zip(
                    np.array(classes) == 1,
                    np.array(scores) > cfg.detector.threshold)
            ])
            detections = np.array(detections)
            if np.sum(mask) != 0:
                detections = detections[mask]
            else:
                detections = []

            # apply ion filter
            detections_iou = []
            for slow_loop_idx in range(len(detections)):

                if detections[slow_loop_idx][0] == -1:
                    continue

                for fast_loop_idx in range(len(detections)):
                    if detections[fast_loop_idx][0] == -1:
                        continue

                    r = bb_intersection_over_union(detections[slow_loop_idx],
                                                   detections[fast_loop_idx])
                    if r > cfg.detector.iou_threshold and slow_loop_idx != fast_loop_idx:
                        detections[fast_loop_idx][0] = -1

            for box in detections:
                if box[0] == -1:
                    continue
                detections_iou.append(box)

            # save detections after filter
            detections = detections_iou

            # -----------------------------------------

            # start tracking part
            if len(detections) != 0:

                # init body_bank
                result_arr = []  # ids of detected persons
                result_features = [
                ]  # features of detected persons respectively to result_arr idxes
                result_dist_arr = [
                ]  # distance of detected persons respectively to result_arr idxes
                body_bank_tmp = body_bank.copy(
                )  # body bank copy for next loop to exclude person after search
                len_body_bank_tmp = len(
                    body_bank_tmp)  # decreasing length of body_bank_tmp

                # search loop
                for i, body_detection_xy in enumerate(detections):
                    # if body bank (gallary) is not empty
                    if len(body_bank) != 0:

                        # if tmp body bank (gallary) is empty but we still have detected persons
                        if len_body_bank_tmp == 0:

                            # get coordinates of person
                            p = detections[i]
                            p = check_coords(p)

                            # do preparations for image to fit it into reid network
                            img = np.transpose(
                                frameL[int(p[0]):int(p[2]),
                                       int(p[1]):int(p[3])]).astype('f') / 255.
                            img = np.expand_dims(img, axis=0)

                            # extract features
                            features_img_query = F.normalize(
                                extract_features(torch.from_numpy(
                                    img).cuda())).cpu().numpy()[0]
                            # add features of new pearson to gallary

                            body_bank.append(features_img_query)
                            result = len(body_bank)
                            result_arr.append(result)
                            #body_bank_bb.append(detections[i])
                            # init distance of new person
                            body_bank_dist.append(0)
                            continue

                        # get distance of person to persons in gallery
                        distmat_arr, query_f = get_dist(
                            [np.array(body_detection_xy)], frameL,
                            body_bank_tmp)

                        # get id of person in gallery
                        result = np.argmin(distmat_arr, 1)[0]
                        #print(distmat_arr[0][result])

                        # if minimum distance is lower cfg.model.threshold it is the same person
                        if (distmat_arr[0][result] < cfg.model.threshold):
                            # add id to result array
                            result_arr.append(result)

                            # exclude person from search
                            body_bank_tmp[result] = []

                            # decrease lenght of tmp body bank
                            len_body_bank_tmp -= 1

                            # save features and distance
                            result_features.append(query_f[result])
                            result_dist_arr.append(np.min(distmat_arr, 1)[0])
                        else:
                            # save features and distance
                            body_bank.append(query_f[result])
                            body_bank_dist.append(0)

                    else:
                        print("add new user")
                        #result = len(body_bank) + 1

                        # get coordinates of person
                        p = detections[i]
                        p = check_coords(p)

                        # do preparations for image to fit it into reid network
                        img = np.transpose(
                            frameL[int(p[0]):int(p[2]),
                                   int(p[1]):int(p[3])]).astype('f') / 255.
                        img = np.expand_dims(img, axis=0)

                        # extract features
                        features_img_query = F.normalize(
                            extract_features(
                                torch.from_numpy(img).cuda())).cpu().numpy()[0]

                        # save features and distance
                        body_bank.append(features_img_query)
                        result = len(body_bank)
                        result_arr.append(result)
                        #body_bank_bb.append(detections[i])
                        body_bank_dist.append(0)

                # ax for visualisation of features
                ax = [0] * len(result_arr)
                if cfg.visualisation.key:
                    if len(body_bank) != 0:
                        fig, ax = plt.subplots(
                            nrows=len(body_bank),
                            ncols=3,
                            gridspec_kw={'width_ratios': [2, 10, 10]},
                            figsize=(10, 2))

                        if len(body_bank) == 1:
                            ax = [ax]

                for i, pack in enumerate(zip(result_arr, result_dist_arr, ax)):
                    id, result, row = pack
                    # add new human if % is too low

                    # refresh bank
                    if cfg.visualisation.key:
                        if len(body_bank) != 0:
                            for idx, col in enumerate(row):
                                if idx == 0:
                                    p = detections[i]
                                    p = check_coords(p)
                                    col.imshow(frameL[int(p[0]):int(p[2]),
                                                      int(p[1]):int(p[3])])
                                if idx == 1:
                                    col.plot(range(0,
                                                   len(body_bank[id]) + 1),
                                             list(body_bank[id]) + [0.40])
                                if idx == 2:
                                    print(
                                        int(
                                            "".join(
                                                str(int(x)) for x in [
                                                    x > 0.1 and y > 0.1
                                                    for x, y in zip(
                                                        body_bank[id],
                                                        result_features[i])
                                                ]), 2))

                    # refresh image in gallery if image is good
                    if result < cfg.model.threshold:
                        p = detections[i]
                        p = check_coords(p)
                        body_bank_dist[
                            id] = result  # + body_bank_dist[id]) / 2
                        body_bank[id] = result_features[
                            i]  #+ body_bank[id]) / 2  #frameL[int(p[1]):int(p[3]), int(p[0]):int(p[2])]

                    text = "_ID_{} <{}>".format(str(id), str(round(result, 3)))
                    cv2.putText(frameL, text, (int(detections[i][1]) - 10,
                                               int(detections[i][0]) - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)

            if cfg.visualisation.key:
                plt.show()
                plt.pause(4)
                plt.close()

            frameL = frameL / 255
            for p in detections:
                cv2.rectangle(frameL, (p[1], p[0]), (p[3], p[2]),
                              (50, 50, 250), 2)

            cv2.imshow("L", frameL)
            #print("shape ", frameL.shape)

            if args_save:
                video_writer.write((frameL * 255).astype(np.uint8))
            cv2.waitKey(1)
Example #14
0
    def find_path(lines, paths, paths_color):
        x = 0
        y = 0
        way_points_draw = np.empty((0, 3))
        open_list = np.arange(0, len(lines) + len(paths))
        prev_color = None

        for i in range(len(lines) + len(paths)):
            min_dist = float('inf')
            arg_min_dist = open_list[0]
            in_same_direction = False
            is_spline = False
            tie_list = np.empty((0, 2))

            for j in open_list:
                is_j_spline = j >= len(lines)
                if is_j_spline:
                    dist_u = get_dist(x, y, *paths[j - len(lines)][0][:2])
                    dist_v = get_dist(x, y, *paths[j - len(lines)][-1][:2])
                else:
                    dist_u = get_dist(x, y, *lines[j].u)
                    dist_v = get_dist(x, y, *lines[j].v)

                if min_dist > dist_u:
                    min_dist = dist_u
                    arg_min_dist = j
                    in_same_direction = True
                    is_spline = is_j_spline

                    if tie_list.size > 0:
                        tie_list = np.empty((0, 2))
                elif min_dist == dist_u:
                    # keep track of ties
                    tie_list = np.vstack([tie_list, [j, True]])

                if min_dist > dist_v:
                    min_dist = dist_v
                    arg_min_dist = j
                    in_same_direction = False
                    is_spline = is_j_spline

                    if tie_list.size > 0:
                        tie_list = np.empty((0, 2))
                elif min_dist == dist_v:
                    # keep track of ties
                    tie_list = np.vstack([tie_list, [j, False]])

            if tie_list.size > 0:
                # break ties

                if prev_color is not None:
                    same_color_filter = []

                    for tie_index in tie_list[:, 0]:
                        if tie_index >= len(lines):
                            curr_color = paths_color[tie_index - len(lines)]
                        else:
                            curr_color = lines[int(tie_index)].color

                        if curr_color == prev_color:
                            same_color_filter.append(int(tie_index))

                    if len(same_color_filter) > 0:
                        tie_list = tie_list[same_color_filter]

                arg_min_dist, in_same_direction = tie_list[0]
                arg_min_dist = int(arg_min_dist)
                is_spline = arg_min_dist >= len(lines)

            if is_spline:
                arg_min_dist -= len(lines)

                if in_same_direction:
                    x, y = paths[arg_min_dist][-1][:2]
                else:
                    x, y = paths[arg_min_dist][0][:2]

                open_list = open_list[open_list != (arg_min_dist + len(lines))]
                prev_color = paths_color[arg_min_dist]
            else:
                if in_same_direction:
                    x, y = lines[arg_min_dist].v
                else:
                    x, y = lines[arg_min_dist].u

                open_list = open_list[open_list != arg_min_dist]
                prev_color = lines[arg_min_dist]

            way_points_draw = np.vstack([
                way_points_draw, [arg_min_dist, in_same_direction, is_spline]
            ])

        return way_points_draw
Example #15
0
def get_sim(a, b, alist):
    if alist is None:
        alist = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.]
    s = [get_dist(a, b, i) for i in alist]
    return s
Example #16
0
def read(humfile, sonpath, cs2cs_args, c, draft, doplot, t, bedpick, flip_lr,
         model, calc_bearing, filt_bearing, chunk):  #cog = 1,
    '''
    Read a .DAT and associated set of .SON files recorded by a Humminbird(R)
    instrument.

    Parse the data into a set of memory mapped files that will
    subsequently be used by the other functions of the PyHum module.

    Export time-series data and metadata in other formats.

    Create a kml file for visualising boat track

    Syntax
    ----------
    [] = PyHum.read(humfile, sonpath, cs2cs_args, c, draft, doplot, t, bedpick, flip_lr, chunksize, model, calc_bearing, filt_bearing, chunk)

    Parameters
    ------------
    humfile : str
       path to the .DAT file
    sonpath : str
       path where the *.SON files are
    cs2cs_args : int, *optional* [Default="epsg:26949"]
       arguments to create coordinates in a projected coordinate system
       this argument gets given to pyproj to turn wgs84 (lat/lon) coordinates
       into any projection supported by the proj.4 libraries
    c : float, *optional* [Default=1450.0]
       speed of sound in water (m/s). Defaults to a value of freshwater
    draft : float, *optional* [Default=0.3]
       draft from water surface to transducer face (m)
    doplot : float, *optional* [Default=1]
       if 1, plots will be made
    t : float, *optional* [Default=0.108]
       length of transducer array (m).
       Default value is that of the 998 series Humminbird(R)
    bedpick : int, *optional* [Default=1]
       if 1, bedpicking with be carried out automatically
       if 0, user will be prompted to pick the bed location on screen
    flip_lr : int, *optional* [Default=0]
       if 1, port and starboard scans will be flipped
       (for situations where the transducer is flipped 180 degrees)
    model: int, *optional* [Default=998]
       A 3 or 4 number code indicating the model number
       Examples: 998, 997, 1198, 1199
    calc_bearing : float, *optional* [Default=0]
       if 1, bearing will be calculated from coordinates
    filt_bearing : float, *optional* [Default=0]
       if 1, bearing will be filtered
    chunk : str, *optional* [Default='d100' (distance, 100 m)]
       letter, followed by a number.
       There are the following letter options:
       'd' - parse chunks based on distance, then number which is distance in m
       'p' - parse chunks based on number of pings, then number which is number of pings
       'h' - parse chunks based on change in heading, then number which is the change in heading in degrees
       '1' - process just 1 chunk

    Returns
    ---------
    sonpath+base+'_data_port.dat': memory-mapped file
        contains the raw echogram from the port side
        sidescan sonar (where present)

    sonpath+base+'_data_port.dat': memory-mapped file
        contains the raw echogram from the starboard side
        sidescan sonar (where present)

    sonpath+base+'_data_dwnhi.dat': memory-mapped file
        contains the raw echogram from the high-frequency
        echosounder (where present)

    sonpath+base+'_data_dwnlow.dat': memory-mapped file
        contains the raw echogram from the low-frequency
        echosounder (where present)

    sonpath+base+"trackline.kml": google-earth kml file
        contains the trackline of the vessel during data
        acquisition

    sonpath+base+'rawdat.csv': comma separated value file
        contains time-series data. columns corresponding to
        longitude
        latitude
        easting (m)
        northing (m)
        depth to bed (m)
        alongtrack cumulative distance (m)
        vessel heading (deg.)

    sonpath+base+'meta.mat': .mat file
        matlab format file containing a dictionary object
        holding metadata information. Fields are:
        e : ndarray, easting (m)
        n : ndarray, northing (m)
        es : ndarray, low-pass filtered easting (m)
        ns : ndarray, low-pass filtered northing (m)
        lat : ndarray, latitude
        lon : ndarray, longitude
        shape_port : tuple, shape of port scans in memory mapped file
        shape_star : tuple, shape of starboard scans in memory mapped file
        shape_hi : tuple, shape of high-freq. scans in memory mapped file
        shape_low : tuple, shape of low-freq. scans in memory mapped file
        dep_m : ndarray, depth to bed (m)
        dist_m : ndarray, distance along track (m)
        heading : ndarray, heading of vessel (deg. N)
        pix_m: float, size of 1 pixel in across-track dimension (m)
        bed : ndarray, depth to bed (m)
        c : float, speed of sound in water (m/s)
        t : length of sidescan transducer array (m)
        spd : ndarray, vessel speed (m/s)
        time_s : ndarray, time elapsed (s)
        caltime : ndarray, unix epoch time (s)
    '''

    # prompt user to supply file if no input file given
    if not humfile:
        print('An input file is required!!!!!!')
        Tk().withdraw(
        )  # we don't want a full GUI, so keep the root window from appearing
        humfile = askopenfilename(filetypes=[("DAT files", "*.DAT")])

    # prompt user to supply directory if no input sonpath is given
    if not sonpath:
        print('A *.SON directory is required!!!!!!')
        Tk().withdraw(
        )  # we don't want a full GUI, so keep the root window from appearing
        sonpath = askdirectory()

    # print given arguments to screen and convert data type where necessary
    if humfile:
        print('Input file is %s' % (humfile))

    if sonpath:
        print('Son files are in %s' % (sonpath))

    if cs2cs_args:
        print('cs2cs arguments are %s' % (cs2cs_args))

    if draft:
        draft = float(draft)
        print('Draft: %s' % (str(draft)))

    if c:
        c = float(c)
        print('Celerity of sound: %s m/s' % (str(c)))

    if doplot:
        doplot = int(doplot)
        if doplot == 0:
            print("Plots will not be made")

    if flip_lr:
        flip_lr = int(flip_lr)
        if flip_lr == 1:
            print("Port and starboard will be flipped")

    if t:
        t = np.asarray(t, float)
        print('Transducer length is %s m' % (str(t)))

    if bedpick:
        bedpick = np.asarray(bedpick, int)
        if bedpick == 1:
            print('Bed picking is auto')
        elif bedpick == 0:
            print('Bed picking is manual')
        else:
            print('User will be prompted per chunk about bed picking method')

    if chunk:
        chunk = str(chunk)
        if chunk[0] == 'd':
            chunkmode = 1
            chunkval = int(chunk[1:])
            print('Chunks based on distance of %s m' % (str(chunkval)))
        elif chunk[0] == 'p':
            chunkmode = 2
            chunkval = int(chunk[1:])
            print('Chunks based on %s pings' % (str(chunkval)))
        elif chunk[0] == 'h':
            chunkmode = 3
            chunkval = int(chunk[1:])
            print('Chunks based on heading devation of %s degrees' %
                  (str(chunkval)))
        elif chunk[0] == '1':
            chunkmode = 4
            chunkval = 1
            print('Only 1 chunk will be produced')
        else:
            print(
                "Chunk mode not understood - should be 'd', 'p', or 'h' - using defaults"
            )
            chunkmode = 1
            chunkval = 100
            print('Chunks based on distance of %s m' % (str(chunkval)))

    if model:
        try:
            model = int(model)
            print("Data is from the %s series" % (str(model)))
        except:
            if model == 'onix':
                model = 0
                print("Data is from the ONIX series")
            elif model == 'helix':
                model = 1
                print("Data is from the HELIX series")
            elif model == 'mega':
                model = 2
                print("Data is from the MEGA series")
#    if cog:
#       cog = int(cog)
#       if cog==1:
#          print "Heading based on course-over-ground"

    if calc_bearing:
        calc_bearing = int(calc_bearing)
        if calc_bearing == 1:
            print("Bearing will be calculated from coordinates")

    if filt_bearing:
        filt_bearing = int(filt_bearing)
        if filt_bearing == 1:
            print("Bearing will be filtered")

    ## for debugging
    #humfile = r"test.DAT"; sonpath = "test_data"
    #cs2cs_args = "epsg:26949"; doplot = 1; draft = 0
    #c=1450; bedpick=1; fliplr=1; chunk = 'd100'
    #model=998; cog=1; calc_bearing=0; filt_bearing=0

    #if model==2:
    #   f = 1000
    #else:
    f = 455

    try:
        print(
            "Checking the epsg code you have chosen for compatibility with Basemap ... "
        )
        from mpl_toolkits.basemap import Basemap
        m = Basemap(projection='merc',
                    epsg=cs2cs_args.split(':')[1],
                    resolution='i',
                    llcrnrlon=10,
                    llcrnrlat=10,
                    urcrnrlon=30,
                    urcrnrlat=30)
        del m
        print("... epsg code compatible")
    except (ValueError):
        print(
            "Error: the epsg code you have chosen is not compatible with Basemap"
        )
        print(
            "please choose a different epsg code (http://spatialreference.org/)"
        )
        print("program will now close")
        sys.exit()

    # start timer
    if os.name == 'posix':  # true if linux/mac or cygwin on windows
        start = time.time()
    else:  # windows
        start = time.clock()

    # if son path name supplied has no separator at end, put one on
    if sonpath[-1] != os.sep:
        sonpath = sonpath + os.sep

    # get the SON files from this directory
    sonfiles = glob.glob(sonpath + '*.SON')
    if not sonfiles:
        sonfiles = glob.glob(os.getcwd() + os.sep + sonpath + '*.SON')

    base = humfile.split('.DAT')  # get base of file name for output
    base = base[0].split(os.sep)[-1]

    # remove underscores, negatives and spaces from basename
    base = humutils.strip_base(base)

    print("WARNING: Because files have to be read in byte by byte,")
    print("this could take a very long time ...")

    #reading each sonfile in parallel should be faster ...
    try:
        o = Parallel(n_jobs=np.min([len(sonfiles), cpu_count()]), verbose=0)(
            delayed(getscans)(sonfiles[k], humfile, c, model, cs2cs_args)
            for k in range(len(sonfiles)))
        X, Y, A, B = zip(*o)

        for k in range(len(Y)):
            if Y[k] == 'sidescan_port':
                dat = A[k]  #data.gethumdat()
                metadat = B[k]  #data.getmetadata()
                if flip_lr == 0:
                    data_port = X[k].astype('int16')
                else:
                    data_star = X[k].astype('int16')

            elif Y[k] == 'sidescan_starboard':
                if flip_lr == 0:
                    data_star = X[k].astype('int16')
                else:
                    data_port = X[k].astype('int16')

            elif Y[k] == 'down_lowfreq':
                data_dwnlow = X[k].astype('int16')

            elif Y[k] == 'down_highfreq':
                data_dwnhi = X[k].astype('int16')

            elif Y[k] == 'down_vhighfreq':  #hopefully this only applies to mega systems
                data_dwnhi = X[k].astype('int16')

        del X, Y, A, B, o
        old_pyread = 0

        if 'data_port' not in locals():
            data_port = ''
            print("portside scan not available")

        if 'data_star' not in locals():
            data_star = ''
            print("starboardside scan not available")

        if 'data_dwnhi' not in locals():
            data_dwnlow = ''
            print("high-frq. downward scan not available")

        if 'data_dwnlow' not in locals():
            data_dwnlow = ''
            print("low-frq. downward scan not available")

    except:  # revert back to older version if paralleleised version fails

        print(
            "something went wrong with the parallelised version of pyread ...")

        try:
            import pyread
        except:
            from . import pyread

        data = pyread.pyread(sonfiles, humfile, c, model, cs2cs_args)

        dat = data.gethumdat()

        metadat = data.getmetadata()

        old_pyread = 1

    nrec = len(metadat['n'])

    metadat['instr_heading'] = metadat['heading'][:nrec]

    #metadat['heading'] = humutils.get_bearing(calc_bearing, filt_bearing, cog, metadat['lat'], metadat['lon'], metadat['instr_heading'])

    try:
        es = humutils.runningMeanFast(metadat['e'][:nrec],
                                      len(metadat['e'][:nrec]) / 100)
        ns = humutils.runningMeanFast(metadat['n'][:nrec],
                                      len(metadat['n'][:nrec]) / 100)
    except:
        es = metadat['e'][:nrec]
        ns = metadat['n'][:nrec]

    metadat['es'] = es
    metadat['ns'] = ns

    try:
        trans = pyproj.Proj(init=cs2cs_args)
    except:
        trans = pyproj.Proj(cs2cs_args.lstrip(), inverse=True)

    lon, lat = trans(es, ns, inverse=True)
    metadat['lon'] = lon
    metadat['lat'] = lat

    metadat['heading'] = humutils.get_bearing(calc_bearing, filt_bearing,
                                              metadat['lat'], metadat['lon'],
                                              metadat['instr_heading'])  #cog

    dist_m = humutils.get_dist(lat, lon)
    metadat['dist_m'] = dist_m

    if calc_bearing == 1:  # recalculate speed, m/s
        ds = np.gradient(np.squeeze(metadat['time_s']))
        dx = np.gradient(np.squeeze(metadat['dist_m']))
        metadat['spd'] = dx[:nrec] / ds[:nrec]

    # theta at 3dB in the horizontal
    theta3dB = np.arcsin(c / (t * (f * 1000)))
    #resolution of 1 sidescan pixel to nadir
    ft = (np.pi / 2) * (1 / theta3dB)  #/ (f/455)

    dep_m = humutils.get_depth(metadat['dep_m'][:nrec])

    if old_pyread == 1:  #older pyread version

        # port scan
        try:
            if flip_lr == 0:
                data_port = data.getportscans().astype('int16')
            else:
                data_port = data.getstarscans().astype('int16')
        except:
            data_port = ''
            print("portside scan not available")

    if data_port != '':

        Zt, ind_port = makechunks_scan(chunkmode, chunkval, metadat, data_port,
                                       0)
        del data_port

        ## create memory mapped file for Z
        shape_port = io.set_mmap_data(sonpath, base, '_data_port.dat', 'int16',
                                      Zt)

        ##we are only going to access the portion of memory required
        port_fp = io.get_mmap_data(sonpath, base, '_data_port.dat', 'int16',
                                   shape_port)

    if old_pyread == 1:  #older pyread version
        # starboard scan
        try:
            if flip_lr == 0:
                data_star = data.getstarscans().astype('int16')
            else:
                data_star = data.getportscans().astype('int16')
        except:
            data_star = ''
            print("starboardside scan not available")

    if data_star != '':

        Zt, ind_star = makechunks_scan(chunkmode, chunkval, metadat, data_star,
                                       1)
        del data_star

        # create memory mapped file for Z
        shape_star = io.set_mmap_data(sonpath, base, '_data_star.dat', 'int16',
                                      Zt)

        star_fp = io.get_mmap_data(sonpath, base, '_data_star.dat', 'int16',
                                   shape_star)

    if 'star_fp' in locals() and 'port_fp' in locals():
        # check that port and starboard are same size
        # and trim if not
        if np.shape(star_fp) != np.shape(port_fp):
            print(
                "port and starboard scans are different sizes ... rectifying")
            if np.shape(port_fp[0])[1] > np.shape(star_fp[0])[1]:
                tmp = port_fp.copy()
                tmp2 = np.empty_like(star_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(star_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_port = io.set_mmap_data(sonpath, base, '_data_port2.dat',
                                              'int16', tmp2)
                #shape_star = shape_port.copy()
                shape_star = tuple(np.asarray(shape_port).copy())

                ##we are only going to access the portion of memory required
                port_fp = io.get_mmap_data(sonpath, base, '_data_port2.dat',
                                           'int16', shape_port)

                ind_port = list(ind_port)
                ind_port[-1] = np.shape(star_fp[0])[1]
                ind_port = tuple(ind_port)

            elif np.shape(port_fp[0])[1] < np.shape(star_fp[0])[1]:
                tmp = star_fp.copy()
                tmp2 = np.empty_like(port_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(port_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_port = io.set_mmap_data(sonpath, base, '_data_star2.dat',
                                              'int16', tmp2)
                #shape_star = shape_port.copy()
                shape_star = tuple(np.asarray(shape_port).copy())

                #we are only going to access the portion of memory required
                star_fp = io.get_mmap_data(sonpath, base, '_data_star2.dat',
                                           'int16', shape_star)

                ind_star = list(ind_star)
                ind_star[-1] = np.shape(port_fp[0])[1]
                ind_star = tuple(ind_star)

    if old_pyread == 1:  #older pyread version
        # low-freq. sonar
        try:
            data_dwnlow = data.getlowscans().astype('int16')
        except:
            data_dwnlow = ''
            print("low-freq. scan not available")

    if data_dwnlow != '':

        Zt, ind_low = makechunks_scan(chunkmode, chunkval, metadat,
                                      data_dwnlow, 2)
        del data_dwnlow

        # create memory mapped file for Z
        shape_low = io.set_mmap_data(sonpath, base, '_data_dwnlow.dat',
                                     'int16', Zt)

        ##we are only going to access the portion of memory required
        dwnlow_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow.dat',
                                     'int16', shape_low)

    if old_pyread == 1:  #older pyread version
        # hi-freq. sonar
        try:
            data_dwnhi = data.gethiscans().astype('int16')
        except:
            data_dwnhi = ''
            print("high-freq. scan not available")

    if data_dwnhi != '':

        Zt, ind_hi = makechunks_scan(chunkmode, chunkval, metadat, data_dwnhi,
                                     3)
        del data_dwnhi

        # create memory mapped file for Z
        shape_hi = io.set_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16',
                                    Zt)

        dwnhi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16',
                                    shape_hi)

    if 'dwnhi_fp' in locals() and 'dwnlow_fp' in locals():
        # check that low and high are same size
        # and trim if not
        if (np.shape(dwnhi_fp) != np.shape(dwnlow_fp)) and (chunkmode != 4):
            print("dwnhi and dwnlow are different sizes ... rectifying")
            if np.shape(dwnhi_fp[0])[1] > np.shape(dwnlow_fp[0])[1]:
                tmp = dwnhi_fp.copy()
                tmp2 = np.empty_like(dwnlow_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(dwnlow_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_low = io.set_mmap_data(sonpath, base, '_data_dwnhi2.dat',
                                             'int16', tmp2)
                #shape_hi = shape_low.copy()
                shape_hi = tuple(np.asarray(shape_low).copy())

                ##we are only going to access the portion of memory required
                dwnhi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi2.dat',
                                            'int16', shape_hi)

                ind_hi = list(ind_hi)
                ind_hi[-1] = np.shape(dwnlow_fp[0])[1]
                ind_hi = tuple(ind_hi)

            elif np.shape(dwnhi_fp[0])[1] < np.shape(dwnlow_fp[0])[1]:
                tmp = dwnlow_fp.copy()
                tmp2 = np.empty_like(dwnhi_fp)
                for k in range(len(tmp)):
                    tmp2[k] = tmp[k][:, :np.shape(dwnhi_fp[k])[1]]
                del tmp

                # create memory mapped file for Z
                shape_low = io.set_mmap_data(sonpath, base,
                                             '_data_dwnlow2.dat', 'int16',
                                             tmp2)
                #shape_hi = shape_low.copy()
                shape_hi = tuple(np.asarray(shape_low).copy())

                ##we are only going to access the portion of memory required
                dwnlow_fp = io.get_mmap_data(sonpath, base,
                                             '_data_dwnlow2.dat', 'int16',
                                             shape_low)

                ind_low = list(ind_low)
                ind_low[-1] = np.shape(dwnhi_fp[0])[1]
                ind_low = tuple(ind_low)

    if old_pyread == 1:  #older pyread version
        del data

    if ('shape_port' in locals()) and (chunkmode != 4):
        metadat['shape_port'] = shape_port
        nrec = metadat['shape_port'][0] * metadat['shape_port'][2]
    elif ('shape_port' in locals()) and (chunkmode == 4):
        metadat['shape_port'] = shape_port
        nrec = metadat['shape_port'][1]
    else:
        metadat['shape_port'] = ''

    if ('shape_star' in locals()) and (chunkmode != 4):
        metadat['shape_star'] = shape_star
        nrec = metadat['shape_star'][0] * metadat['shape_star'][2]
    elif ('shape_star' in locals()) and (chunkmode == 4):
        metadat['shape_star'] = shape_star
        nrec = metadat['shape_star'][1]
    else:
        metadat['shape_star'] = ''

    if ('shape_hi' in locals()) and (chunkmode != 4):
        metadat['shape_hi'] = shape_hi
        #nrec = metadat['shape_hi'][0] * metadat['shape_hi'][2] * 2
    elif ('shape_hi' in locals()) and (chunkmode == 4):
        metadat['shape_hi'] = shape_hi
    else:
        metadat['shape_hi'] = ''

    if ('shape_low' in locals()) and (chunkmode != 4):
        metadat['shape_low'] = shape_low
        #nrec = metadat['shape_low'][0] * metadat['shape_low'][2] * 2
    elif ('shape_low' in locals()) and (chunkmode == 4):
        metadat['shape_low'] = shape_low
    else:
        metadat['shape_low'] = ''

    #make kml boat trackline
    humutils.make_trackline(lon, lat, sonpath, base)

    if 'port_fp' in locals() and 'star_fp' in locals():

        #if not os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'meta.mat'))):
        if 2 > 1:
            if bedpick == 1:  # auto

                x, bed = humutils.auto_bedpick(ft, dep_m, chunkmode, port_fp,
                                               c)

                if len(dist_m) < len(bed):
                    dist_m = np.append(
                        dist_m, dist_m[-1] * np.ones(len(bed) - len(dist_m)))

                if doplot == 1:
                    if chunkmode != 4:
                        for k in range(len(star_fp)):
                            plot_2bedpicks(
                                port_fp[k], star_fp[k],
                                bed[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                                dist_m[ind_port[-1] * k:ind_port[-1] *
                                       (k + 1)],
                                x[ind_port[-1] * k:ind_port[-1] * (k + 1)], ft,
                                shape_port, sonpath, k, chunkmode)
                    else:
                        plot_2bedpicks(port_fp, star_fp, bed, dist_m, x, ft,
                                       shape_port, sonpath, 0, chunkmode)

                # 'real' bed is estimated to be the minimum of the two
                bed = np.min(np.vstack((bed[:nrec], np.squeeze(x[:nrec]))),
                             axis=0)
                bed = humutils.runningMeanFast(bed, 3)

            elif bedpick > 1:  # user prompt

                x, bed = humutils.auto_bedpick(ft, dep_m, chunkmode, port_fp,
                                               c)

                if len(dist_m) < len(bed):
                    dist_m = np.append(
                        dist_m, dist_m[-1] * np.ones(len(bed) - len(dist_m)))

                # 'real' bed is estimated to be the minimum of the two
                bed = np.min(np.vstack((bed[:nrec], np.squeeze(x[:nrec]))),
                             axis=0)
                bed = humutils.runningMeanFast(bed, 3)

                # manually intervene
                fig = plt.figure()
                ax = plt.gca()
                if chunkmode != 4:
                    im = ax.imshow(np.hstack(port_fp),
                                   cmap='gray',
                                   origin='upper')
                else:
                    im = ax.imshow(port_fp, cmap='gray', origin='upper')
                plt.plot(bed, 'r')
                plt.axis('normal')
                plt.axis('tight')

                pts1 = plt.ginput(
                    n=300,
                    timeout=30)  # it will wait for 200 clicks or 60 seconds
                x1 = map(lambda x: x[0],
                         pts1)  # map applies the function passed as
                y1 = map(lambda x: x[1],
                         pts1)  # first parameter to each element of pts
                plt.close()
                del fig

                if x1 != []:  # if x1 is not empty
                    tree = KDTree(zip(np.arange(1, len(bed)), bed))
                    try:
                        dist, inds = tree.query(zip(x1, y1),
                                                k=100,
                                                eps=5,
                                                n_jobs=-1)
                    except:
                        dist, inds = tree.query(zip(x1, y1), k=100, eps=5)

                    b = np.interp(inds, x1, y1)
                    bed2 = bed.copy()
                    bed2[inds] = b
                    bed = bed2

                if doplot == 1:
                    if chunkmode != 4:
                        for k in range(len(star_fp)):
                            plot_2bedpicks(
                                port_fp[k], star_fp[k],
                                bed[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                                dist_m[ind_port[-1] * k:ind_port[-1] *
                                       (k + 1)],
                                x[ind_port[-1] * k:ind_port[-1] * (k + 1)], ft,
                                shape_port, sonpath, k, chunkmode)
                    else:
                        plot_2bedpicks(port_fp, star_fp, bed, dist_m, x, ft,
                                       shape_port, sonpath, 0, chunkmode)

            else:  #manual

                beds = []

                if chunkmode != 4:
                    for k in range(len(port_fp)):
                        raw_input(
                            "Bed picking " + str(k + 1) + " of " +
                            str(len(port_fp)) +
                            ", are you ready? 30 seconds. Press Enter to continue..."
                        )
                        bed = {}
                        fig = plt.figure()
                        ax = plt.gca()
                        im = ax.imshow(port_fp[k], cmap='gray', origin='upper')
                        pts1 = plt.ginput(
                            n=300, timeout=30
                        )  # it will wait for 200 clicks or 60 seconds
                        x1 = map(lambda x: x[0],
                                 pts1)  # map applies the function passed as
                        y1 = map(
                            lambda x: x[1],
                            pts1)  # first parameter to each element of pts
                        bed = np.interp(np.r_[:ind_port[-1]], x1, y1)
                        plt.close()
                        del fig
                        beds.append(bed)
                        extent = np.shape(port_fp[k])[0]
                    bed = np.asarray(np.hstack(beds), 'float')
                else:
                    raw_input(
                        "Bed picking - are you ready? 30 seconds. Press Enter to continue..."
                    )
                    bed = {}
                    fig = plt.figure()
                    ax = plt.gca()
                    im = ax.imshow(port_fp, cmap='gray', origin='upper')
                    pts1 = plt.ginput(
                        n=300, timeout=30
                    )  # it will wait for 200 clicks or 60 seconds
                    x1 = map(lambda x: x[0],
                             pts1)  # map applies the function passed as
                    y1 = map(lambda x: x[1],
                             pts1)  # first parameter to each element of pts
                    bed = np.interp(np.r_[:ind_port[-1]], x1, y1)
                    plt.close()
                    del fig
                    beds.append(bed)
                    extent = np.shape(port_fp)[1]
                    bed = np.asarray(np.hstack(beds), 'float')

            # now revise the depth in metres
            dep_m = (1 / ft) * bed

            if doplot == 1:
                if chunkmode != 4:
                    for k in range(len(star_fp)):
                        plot_bedpick(
                            port_fp[k], star_fp[k], (1 / ft) *
                            bed[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                            dist_m[ind_port[-1] * k:ind_port[-1] * (k + 1)],
                            ft, shape_port, sonpath, k, chunkmode)
                else:
                    plot_bedpick(port_fp, star_fp, (1 / ft) * bed, dist_m, ft,
                                 shape_port, sonpath, 0, chunkmode)

            metadat['bed'] = bed[:nrec]

    else:
        metadat['bed'] = dep_m[:nrec] * ft

    metadat['heading'] = metadat['heading'][:nrec]
    metadat['lon'] = lon[:nrec]
    metadat['lat'] = lat[:nrec]
    metadat['dist_m'] = dist_m[:nrec]
    metadat['dep_m'] = dep_m[:nrec]
    metadat['pix_m'] = 1 / ft
    metadat['bed'] = metadat['bed'][:nrec]
    metadat['c'] = c
    metadat['t'] = t
    if model == 2:
        metadat['f'] = f * 2
    else:
        metadat['f'] = f

    metadat['spd'] = metadat['spd'][:nrec]
    metadat['time_s'] = metadat['time_s'][:nrec]
    metadat['e'] = metadat['e'][:nrec]
    metadat['n'] = metadat['n'][:nrec]
    metadat['es'] = metadat['es'][:nrec]
    metadat['ns'] = metadat['ns'][:nrec]
    try:
        metadat['caltime'] = metadat['caltime'][:nrec]
    except:
        metadat['caltime'] = metadat['caltime']

    savemat(os.path.normpath(os.path.join(sonpath, base + 'meta.mat')),
            metadat,
            oned_as='row')

    f = open(os.path.normpath(os.path.join(sonpath, base + 'rawdat.csv')),
             'wt')
    writer = csv.writer(f)
    writer.writerow(
        ('longitude', 'latitude', 'easting', 'northing', 'depth (m)',
         'distance (m)', 'instr. heading (deg)', 'heading (deg.)'))
    for i in range(0, nrec):
        writer.writerow(
            (float(lon[i]), float(lat[i]), float(es[i]), float(ns[i]),
             float(dep_m[i]), float(dist_m[i]),
             float(metadat['instr_heading'][i]), float(metadat['heading'][i])))
    f.close()

    del lat, lon, dep_m  #, dist_m

    if doplot == 1:

        plot_pos(sonpath, metadat, es, ns)

        if 'dwnlow_fp' in locals():

            plot_dwnlow(dwnlow_fp, chunkmode, sonpath)

        if 'dwnhi_fp' in locals():

            plot_dwnhi(dwnhi_fp, chunkmode, sonpath)

    if os.name == 'posix':  # true if linux/mac
        elapsed = (time.time() - start)
    else:  # windows
        elapsed = (time.clock() - start)
    print("Processing took " + str(elapsed) + "seconds to analyse")

    print("Done!")
    print("===================================================")