def find_peak(th_row, th_col):
			#start=time.time()
			rows = th_row.shape[0]
			check_width = 30
			p_row = np.array([], np.int16)
			p_column = np.arrya([], np.int16)
			i = check_width
			end_flag = False
			first_time = True
			while i < rows - check_width:
				if th_row[i] <= th_row[i-1] and th_row[i] <= th_row[i+1]:
					peak_flag = True
            
					for p in range(1, check_width):
						if th_row[i] > th_row[i-p] or th_row[i] > th_row[i+p]:
							peak_flag = False
							break

					if peak_flag:
						if first_time:
							first_time = False
						elif p_row[-1] - th_row[i] > 50:
							end_flag = True
							break
						p_row = np.append(p_row, th_row[i])
						p_column = np.append(p_column, th_col[i])
						i += check_width - 5
        
				if end_flag:
					break
					i += 1
			#stop=time.time()-start
			#print(stop)
			return p_row, p_column
예제 #2
0
def split(vector, param):
    """
    This function takes an array and splits it into equal two half.
    This function returns two split vectors (positive and negative scan)
    based on step size and potential limits. The output then can be used
    to ease the implementation of peak detection and baseline finding.

    Parameters
    ----------
    vector : list
             Can be in any form of that can be turned into numpy array.
             Normally it expects pandas DataFrame column.
    param: dict
           Dictionary of parameters governing the CV run.

    Returns
    -------
    forward: array
             array containing the values of the forward scan
    backward: array
              array containing the potential values of the backward scan
    """
    assert isinstance(vector, pd.core.series.Series),\
        "Input should be pandas series"
    scan = int(
        abs(param['vlimit_1(V)'] - param['vinit(V)']) / param['step_size(V)'])
    if param['vinit(V)'] > param['vlimit_1(V)']:
        backward = np.array(vector[:scan])
        forward = np.array(vector[scan:])
        # vector_p = vector_p.reset_index(drop=True)
    else:
        forward = np.array(vector[:scan])
        backward = np.arrya(vector[scan:])
        # vector_n = vector_n.reset_index(drop=True)
    return forward, backward
예제 #3
0
 def predict(self, image, threshold=0.5):
     '''
     Args:
         image (str/np.ndarray): path of image/ np.ndarray read by cv2
         threshold (float): threshold of predicted box' score
     Returns:
         results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
                         matix element:[class, score, x_min, y_min, x_max, y_max]
                         MaskRCNN's results include 'masks': np.ndarray: 
                         shape:[N, class_num, mask_resolution, mask_resolution]  
     '''
     inputs, im_info = self.preprocess(image)
     np_boxes, np_masks = None, None
     if self.config.use_python_inference:
         outs = self.executor.run(self.program,
                                  feed=inputs,
                                  fetch_list=self.fecth_targets,
                                  return_numpy=False)
         np_boxes = np.array(outs[0])
         if self.config.mask_resolution is not None:
             np_masks = np.arrya(outs[1])
     else:
         input_names = self.predictor.get_input_names()
         for i in range(len(inputs)):
             input_tensor = self.predictor.get_input_tensor(input_names[i])
             input_tensor.copy_from_cpu(inputs[input_names[i]])
         self.predictor.zero_copy_run()
         output_names = self.predictor.get_output_names()
         boxes_tensor = self.predictor.get_output_tensor(output_names[0])
         np_boxes = boxes_tensor.copy_to_cpu()
         if self.config.mask_resolution is not None:
             masks_tensor = self.predictor.get_output_tensor(
                 output_names[1])
             np_masks = masks_tensor.copy_to_cpu()
     results = self.postprocess(np_boxes,
                                np_masks,
                                im_info,
                                threshold=threshold)
     return results
 def generate_time_grid(self):
     start = self.pricing_date
     end = self.final_date
     # pandas date_range function
     # freq = e.g. 'B' for Business Day,
     # 'w' for Weekly, 'M' for Monthly
     time_grid = pd.date_range(start=start, end=end,
                               freq=self.frequency).to_pydatetime()
     time_grid = list(time_grid)
     # enhance time_grid by start, end ,and special_dates
     if start not in time_grid:
         time_grid.insert(0, start)
         # insert start date if not in list
     if end not in time_grid:
         time_grid.append(end)
         # insert end date if not in list
     if len(self.special_dates) > 0:
         # add all special dates
         time_grid.extend(self.special_dates)
         # delete duplicates
         time_grid = list(set(time_grid))
         # sort list
         time_grid.sort()
     self.time_grid = np.arrya(time_grid)
예제 #5
0
basicEESimulation.py

#this is a basic simulation of a portfolio based on a single homeowner

#import statements

import numpy as np 

#simluation parameters

seed(150000)
T = 60 #timesteps in months


portfolioInvestment = 5000

#expected savings path
portfolioSavings = np.array([])

#monthly hazard
monthlyHazard = np.arrya([])
#Determine optimal contract length

#shared model


#determine risk weighted return based on contract length

예제 #6
0
def main(score_th=0.25):
    N_CLASSES = len(CLASS_NAMES_Vin)
    torch.backends.cudnn.benchmark = True
    #classification pre-trained model
    CKPT_PATH = '/data/pycode/CXRAD/ckpt/SANet.pkl'
    cls_model = SANet(num_classes=N_CLASSES)
    if os.path.exists(CKPT_PATH):
        checkpoint = torch.load(CKPT_PATH)
        cls_model.load_state_dict(checkpoint)  #strict=False
        print(
            "=> Loaded well-trained SANet model checkpoint of Vin-CXR dataset: "
            + CKPT_PATH)
    cls_model = cls_model.cuda()
    cls_model.eval()
    #detection pre-trained model
    od_model = torchvision.models.detection.maskrcnn_resnet50_fpn(
        pretrained=True)
    in_features = od_model.roi_heads.box_predictor.cls_score.in_features
    od_model.roi_heads.box_predictor = FastRCNNPredictor(
        in_features, N_CLASSES)
    in_features_mask = od_model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    od_model.roi_heads.mask_predictor = MaskRCNNPredictor(
        in_features_mask, hidden_layer, N_CLASSES)  #
    CKPT_PATH = '/data/pycode/CXRAD/ckpt/Maskrcnn.pkl'
    if os.path.exists(CKPT_PATH):
        checkpoint = torch.load(CKPT_PATH)
        od_model.load_state_dict(checkpoint)  #strict=False
        print(
            "=> Loaded well-trained Maskrcnn model checkpoint of Vin-CXR dataset: "
            + CKPT_PATH)
    od_model = od_model.cuda()
    od_model.eval()

    #CVTE-CXR dataset
    cvte_csv_file = '/data/pycode/CXRAD/dataset/cvte_test.txt'  #testing file patt
    cvte_image_dir = '/data/fjsdata/CVTEDR/images/'  #image path
    # test images and show the results
    images = pd.read_csv(cvte_csv_file, sep=',', header=None).values
    gt, pred, box = [], [], []
    for image in images:
        gt.append(image[1])
        img = cvte_image_dir + image[0]
        image = Image.open(img).convert('RGB')
        image = torch.unsqueeze(transform_seq(image), 0)
        var_image = torch.autograd.Variable(image).cuda()
        #generate classification result
        var_output = cls_model(var_image)  #forward
        prob_cls = 1 - var_output[0].data.cpu()[0].numpy()
        #generate detection result
        var_output = od_model(var_image)  #dict
        boxes = var_output[0]['boxes'].data.cpu().numpy()
        scores = var_output[0]['scores'].data.cpu().numpy()
        if len(scores) > 0:
            ind = np.argmax(scores)
            pred.append(max([prob_cls, scores[ind]]))
            box.append(boxes[ind])
        else:
            pred.append(prob_cls)
            box.append([0, 0, 1, 1])

        sys.stdout.write('\r image process: = {}'.format(len(pred)))
        sys.stdout.flush()
    #evaluation
    gt_np = np.array(gt)
    pred_np = np.array(pred)
    box = np.arrya(box)
    assert gt_np.shape == pred_np.shape
    #AUROCS
    AUROCs = roc_auc_score(gt_np, pred_np)
    print('AUROC = {:.4f}'.format(AUROCs))
    #sensitivity and specificity
    pred_np = np.where(pred_np > score_th, 1, 0)
    tn, fp, fn, tp = confusion_matrix(gt_np, pred_np).ravel()
    sen = tp / (tp + fn)
    spe = tn / (tn + fp)
    print('\r\rSen = {:.4f} and Spe = {:.4f}'.format(sen, spe))