def __init__(self, input_size=112, model_path=None, pos_thr=None, neg_thr=None, drop_ratio=0.6,
                 net_depth=50, use_random_crop=False,
                 use_gray=False, use_center_crop=False, center_crop_ratio=0.8, device='CPU', use_onnx=False):
        assert model_path is not None

        self.pos_thr = pos_thr
        self.neg_thr = neg_thr
        self.input_size = input_size
        self.transforms = get_test_transforms(input_size, use_random_crop=use_random_crop, use_gray=use_gray,
                                              use_center_crop=use_center_crop, center_crop_ratio=center_crop_ratio)

        self.use_onnx = use_onnx
        if use_onnx:
            import onnxruntime

            self.model = onnxruntime.InferenceSession(model_path)
        else:
            self.model = Backbone(net_depth, drop_ratio, 'ir_se').to(device)
            self.model.eval()
            if device == 'cpu':
                self.model.load_state_dict(torch.load(model_path, map_location=device), strict=True)
            else:
                self.model.load_state_dict(torch.load(model_path), strict=True)

        self.use_cuda = device == "cuda"
def search_by_thoroughfare(thoroughfare):
    """
    This GET request function queries the database for companies that contain
    the given name. For more info look into the 'utilities' module at the
    'search_field_in_db_by_value' function.

    :param thoroughfare: string object containing the address where companies 
                         have their offices
    """
    backbone = Backbone()

    return backbone.search_field_in_db_by_value_and_return_serialized_result("thoroughfare", thoroughfare)
def search_by_legal_name(legal_name):
    """
    This GET request function queries the database for companies that contain
    the given name. For more info look into the 'utilities' module at the
    'search_field_in_db_by_value' function.

    :param legal_name: string object containing the name of the company we
                       search for
    """
    
    backbone = Backbone()

    return backbone.search_field_in_db_by_value_and_return_serialized_result("legal_name", legal_name)
Exemple #4
0
def get_embeddings(data_root,
                   model_root,
                   input_size=[112, 112],
                   embedding_size=512):

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # check data and model paths
    assert os.path.exists(data_root)
    assert os.path.exists(model_root)
    print(f"Data root: {data_root}")

    # define image preprocessing
    transform = transforms.Compose(
        [
            transforms.Resize([
                int(128 * input_size[0] / 112),
                int(128 * input_size[0] / 112)
            ], ),  # smaller side resized
            transforms.CenterCrop([input_size[0], input_size[1]]),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
        ], )

    # define data loader
    dataset = datasets.ImageFolder(data_root, transform)
    loader = data.DataLoader(
        dataset,
        batch_size=1,
        shuffle=False,
        pin_memory=True,
        num_workers=0,
    )
    print(f"Number of classes: {len(loader.dataset.classes)}")

    # load backbone weigths from a checkpoint
    backbone = Backbone(input_size)
    backbone.load_state_dict(
        torch.load(model_root, map_location=torch.device("cpu")))
    backbone.to(device)
    backbone.eval()

    # get embedding for each face
    embeddings = np.zeros([len(loader.dataset), embedding_size])
    with torch.no_grad():
        for idx, (image, _) in enumerate(
                tqdm(loader,
                     desc="Create embeddings matrix",
                     total=len(loader)), ):
            embeddings[idx, :] = F.normalize(backbone(image.to(device))).cpu()

    # get all original images
    images = []
    for img_path, _ in dataset.samples:
        img = cv2.imread(img_path)
        images.append(img)

    return images, embeddings
Exemple #5
0
def main(input_str):
    """
    Main function takes string input and returns the best results depending
    on scoring. Single result include sh-miR sequence,
    score and link to 2D structure from mfold program
    """
    sequence = check_input(input_str)
    seq1, seq2, shift_left, shift_right = sequence
    if not seq2:
        seq2 = reverse_complement(seq1)
    all_frames = get_all()
    if 'error' in all_frames:  #database error handler
        return all_frames

    frames = get_frames(seq1, seq2, shift_left, shift_right, all_frames)
    original_frames = [Backbone(**elem) for elem in all_frames]

    frames_with_score = []
    for frame_tuple, original in zip(frames, original_frames):
        score = 0
        frame, insert1, insert2 = frame_tuple
        mfold_data = mfold(frame.template(insert1, insert2))
        if 'error' in mfold_data:
            return mfold_data
        pdf, ss = mfold_data[0], mfold_data[1]
        score += score_frame(frame_tuple, ss, original)
        score += score_homogeneity(original)
        score += two_same_strands_score(seq1, original)
        frames_with_score.append(
            (score, frame.template(insert1, insert2), frame.name, pdf))

    sorted_frames = [elem for elem in sorted(frames_with_score,\
                        key=lambda x: x[0], reverse=True) if elem[0] > 60]
    return {'result': sorted_frames[:3]}
Exemple #6
0
def eval(checkpoint, data_path, params):
    # 数据
    files, label, boxes = load_annotation(data_path, 'test')
    eval_set = YoloDataset(paths=files,
                           bboxes=boxes,
                           labels=label,
                           params=params,
                           train=False)
    eval_loader = DataLoader(eval_set,
                             batch_size=params.batch_size,
                             num_workers=params.num_gpus * 8,
                             shuffle=False)
    # 模型
    state_dict = torch.load(checkpoint)
    model = Backbone()
    model.load_state_dict(state_dict)
    model = model.cuda()
    # 损失
    criterion = SumSquareError()

    model.eval()
    total_loss = 0
    with torch.no_grad():
        for iter, (img, annotation) in enumerate(eval_loader):
            img = img.cuda()
            annotation = annotation.cuda()
            output = model(img)
            loss = criterion(output, annotation).item()
            total_loss += loss * len(img)

        print(f'evaluate loss: {total_loss / len(eval_set)}')
Exemple #7
0
    def __init__(self, args, mri_data, num_class):
        self.args = args
        self.num_class = num_class
        self.mri_data = mri_data

        model = Backbone(num_class, args.num_slices)

        self.model = model

        # Using cuda
        if args.cuda:
            self.model = DataParallel(self.model).cuda()

        # Resuming checkpoint
        if not os.path.isfile(args.resume):
            raise RuntimeError("=> no checkpoint found at '{}'".format(
                args.resume))
        checkpoint = torch.load(args.resume)

        if args.cuda:
            self.model.module.load_state_dict(checkpoint['state_dict'])
        else:
            self.model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(args.resume))
def create_uncertain_pairs_file():
    """
    This function is called if the user wants to create a training file on the
    client side of the application. It assumes all the neccessary files were
    previously uploaded by the user. If neccessary, it creates the 2nd input 
    dataset and then executes the first 20 jupyter notebook cells. It executes
    the first 20 cells, because those cells are needed to create the uncertain
    pairs file. This file will contain pairs of examples (from the two input datasets)
    that Dedupe is unsure about.

    In the first 20 cells things like module imports, reading the configuration file, 
    reading the input datasets and creating the uncertain pairs file (if it is
    specified in the configuration file that the user wants to create the training file)
    are done.
    """

    backbone = Backbone()

    if backbone.is_tmp_file_used():
        backbone.extract_data_from_db_and_create_second_input_dataset()

    backbone.execute_jupyter_notebook_cells(idx_first_cell=0, idx_last_cell=20)

    return "Uncertain pairs file created successfully"
        line = str(user) + '::'  + str(item) + '::' + str(timestamp) + '\n'
        lines.append(line)
    with file(filename, 'a') as outfile:
        outfile.writelines(lines)
    
def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)
        
def get_since_id(path):
    since_id  = 0
    for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern,re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id

if __name__ == "__main__":        
    b = Backbone()

    datasetpath = 'datasets/YouTube'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id =  b.searchTweets('I liked a @YouTube video', since_id)
    dataset = extractDataset(tweets)
    writeDataset(dataset, datasetpath + '/likes.dat')
    writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #10
0
    def __init__(self, pretrained=False):
        super(Detector, self).__init__()

        # ---------------------------
        # TODO: Param
        self.view_size = 1025 # odd
        self.classes = 80
        self.nms_th = 0.05
        self.nms_iou = 0.6
        self.max_detections = 3000
        self.tlbr_max_minmax = [[5, 64], [64, 128], [128, 256], [256, 512], [512, 1024]]
        self.phpw = [[129, 129], [65, 65], [33, 33], [17, 17], [9, 9]]
        self.r = [12, 24, 48, 96, 192]
        # ---------------------------

        # fpn =======================================================
        self.backbone = Backbone(pretrained=pretrained)
        self.relu = nn.ReLU(inplace=True)
        self.conv_out6 = nn.Conv2d(2048, 256, kernel_size=3, padding=1, stride=2)
        self.conv_out7 = nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=2)
        self.prj_5 = nn.Conv2d(2048, 256, kernel_size=1)
        self.prj_4 = nn.Conv2d(1024, 256, kernel_size=1)
        self.prj_3 = nn.Conv2d(512, 256, kernel_size=1)
        self.conv_5 =nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv_4 =nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv_3 =nn.Conv2d(256, 256, kernel_size=3, padding=1)

        # head =======================================================
        self.conv_cls = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, self.classes, kernel_size=3, padding=1))
        self.conv_reg = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 4, kernel_size=3, padding=1))

        # reinit head =======================================================
        init_layers = [self.conv_cls, self.conv_reg]
        for init_layer in init_layers:
            for item in init_layer.children():
                if isinstance(item, nn.Conv2d) or isinstance(item, nn.ConvTranspose2d):
                    nn.init.constant_(item.bias, 0)
                    nn.init.normal_(item.weight, mean=0, std=0.01)
        pi = 0.01
        _bias = -math.log((1.0-pi)/pi)
        nn.init.constant_(self.conv_cls[-1].bias, _bias)

        # learnable parameter for scales =====================================
        self.scale_param = nn.Parameter(torch.FloatTensor([32,64,128,256,512]))
        self.scale_param.requires_grad = False
Exemple #11
0

def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii=False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)


def get_since_id(path):
    since_id = 0
    for infile in glob.glob(os.path.join(path, 'tweets_*.json')):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern, re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id


if __name__ == "__main__":
    b = Backbone()

    datasetpath = 'datasets/Lastfm'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id = b.searchTweets("I'm listening to via @lastfm",
                                          since_id)
    dataset = extractDataset(tweets)
    writeDataset(dataset, datasetpath + '/listens.dat')
    writeTweets(tweets, datasetpath + '/tweets_' + str(new_since_id) + '.json')
def run_algorithm():
    """
    This function represents the main algorithm of the service. It assumes all the
    neccessary files were uploaded by the user.
    The execution flow is the next one:
    1) Create Backbone object, which will create the configuration file for Dedupe
    2) If the user has not provided the 2nd input dataset, then a temporary file,
       that will contain rows extracted by 'jurisdiction' from the database, 
       will be created using the Backbone object
    3) Execute all the cells in the jupyter notebook
    4) If a temporary file was created update the given cluster_ids (read more about
       this in a comment below)
    5) Insert the new cluster_ids into the 'backbone_index' table
    6) Create table(s) in the database and insert the dataset(s) resulted from the
       Dedupe algorithm.
    7) Remove all the files that were used in the process, except for the configuration
       file provided by the user. We do not remove this file, because if the user
       would like to see some results, that are stored in the database, it will need
       to provide again the configuration file (since the system needs the database
       configuration data). So, we leave it there for convenience
    """

    # Backbone object that will do all the work
    backbone = Backbone()

    # run the backbone script file
    if backbone.is_tmp_file_used():
        backbone.extract_data_from_db_and_create_second_input_dataset()

    # execute all the cells in the Jupyter notebook
    backbone.execute_jupyter_notebook_cells(idx_first_cell=0)

    # if the 2nd dataset contained rows from the database, those examples from the 2nd dataset
    # already had assigned a cluster_id (backbone index), but when Dedupe created new clusters,
    # which were made of 1 example from the 1st dataset and one from the 2nd dataset,
    # it gave that cluster a unique cluster_id (one that did not exist in the backbone_index table).
    # So, we have to update the cluster_ids of those clusters with the cluster_ids that the examples 
    # from the 2nd dataset originally had.
    if backbone.is_tmp_file_used():
        utilities.update_cluster_ids_of_output_file_1(
            backbone.output_file_1,
            backbone.output_file_2,
            backbone.input_file_2
        )

    # insert the new cluster_ids created by Dedupe into the backbone_index table
    if backbone.is_tmp_file_used():
        utilities.insert_new_cluster_ids_into_backbone_index_table(
            backbone.data_from_config_file['database_config'],
            backbone.output_file_1,
            output_file_2=None,
            last_cluster_id=backbone.last_cluster_id_in_db)
    else:
        utilities.insert_new_cluster_ids_into_backbone_index_table(
            backbone.data_from_config_file['database_config'],
            backbone.output_file_1,
            backbone.output_file_2,
            backbone.last_cluster_id_in_db)

    # create a new table having FK on cluster_id (referencing the PK 'idx' of the backbone_index table) 
    # and insert the resulted dataset from Dedupe in the table
    # the resulted dataset is formed from the input dataset + 2 new columns: 'cluster_id' and 'link_score'
    utilities.create_table_and_insert_dataset_resulted_from_dedupe(
        backbone.data_from_config_file['database_config'],
        backbone.data_from_config_file['provider_1_name'],
        backbone.output_file_1)

    # if we were provided with a 2nd input dataset, insert it in the DB also
    if not backbone.is_tmp_file_used():
        utilities.create_table_and_insert_dataset_resulted_from_dedupe(
            backbone.data_from_config_file['database_config'],
            backbone.data_from_config_file['provider_2_name'],
            backbone.output_file_2)

    os.remove(backbone.input_file_1)
    os.remove(backbone.input_file_2)
    os.remove(backbone.training_file_name)
    os.remove(backbone.output_file_1)
    os.remove(backbone.output_file_2)
    os.remove(backbone.configuration_file_name_for_dedupe)

    if backbone.settings_file_name:
        os.remove(backbone.settings_file_name)
    else:
        os.remove("settings_file")

    return "Algorithm ran successfully"
class DogEyesVerifier(object):
    def __init__(self, input_size=112, model_path=None, pos_thr=None, neg_thr=None, drop_ratio=0.6,
                 net_depth=50, use_random_crop=False,
                 use_gray=False, use_center_crop=False, center_crop_ratio=0.8, device='CPU', use_onnx=False):
        assert model_path is not None

        self.pos_thr = pos_thr
        self.neg_thr = neg_thr
        self.input_size = input_size
        self.transforms = get_test_transforms(input_size, use_random_crop=use_random_crop, use_gray=use_gray,
                                              use_center_crop=use_center_crop, center_crop_ratio=center_crop_ratio)

        self.use_onnx = use_onnx
        if use_onnx:
            import onnxruntime

            self.model = onnxruntime.InferenceSession(model_path)
        else:
            self.model = Backbone(net_depth, drop_ratio, 'ir_se').to(device)
            self.model.eval()
            if device == 'cpu':
                self.model.load_state_dict(torch.load(model_path, map_location=device), strict=True)
            else:
                self.model.load_state_dict(torch.load(model_path), strict=True)

        self.use_cuda = device == "cuda"

    def is_same(self, img1, img2, is_same_side, pos_thr=None, neg_thr=None, use_pos_low_thr=False):
        if pos_thr is None:
            pos_thr = self.pos_thr
        if neg_thr is None:
            neg_thr = self.neg_thr

        if pos_thr is None and neg_thr is None:
            raise ValueError("pos_thr and neg_thr are None.")

        if isinstance(img1, str):
            img1 = cv2.imread(img1, cv2.IMREAD_COLOR)
            img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
        elif isinstance(img1, JpegImageFile):
            if img1.mode != "RGB":
                img1 = img1.convert("RGB")
            img1 = np.array(img1)

        if isinstance(img2, str):
            img2 = cv2.imread(img2, cv2.IMREAD_COLOR)
            img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
        elif isinstance(img2, JpegImageFile):
            if img2.mode != "RGB":
                img2 = img2.convert("RGB")
            img2 = np.array(img2)

        img1 = self.transforms(image=img1)['image'].unsqueeze(0)
        img2 = self.transforms(image=img2)['image'].unsqueeze(0)

        if self.use_cuda:
            img1 = img1.cuda()
            img2 = img2.cuda()

        if self.use_onnx:
            input1 = {self.model.get_inputs()[0].name: to_numpy(img1)}
            input2 = {self.model.get_inputs()[0].name: to_numpy(img2)}
            embedding1 = self.model.run(None, input1)[0]
            embedding2 = self.model.run(None, input2)[0]
        else:
            with torch.set_grad_enabled(False):
                embedding1 = self.model(img1).cpu().data.numpy()
                embedding2 = self.model(img2).cpu().data.numpy()
        dist = np.sum(np.square(np.subtract(embedding1, embedding2)), 1)[0]

        if is_same_side:
            return dist < pos_thr
        else:
            if use_pos_low_thr:
                return dist < neg_thr
            else:
                return False
Exemple #14
0
def get_frames(seq1, seq2, shift_left, shift_right, all_frames):
    """Take output of check_input function and insert into flanking sequences.
    take from database all miRNA results and check if ends of input is suitable
    for flanking sequences.
    If first value == and miRNA_end_5 second value == miRNA_end_3 then simply
    concatenate
    sequences flanks5_s + first_sequence + loop_s + second_sequence + flanks3_s.
    If any end is different function has to modify end of the insert:
    Right end:
    if miRNA_end_5 < first_end
    add to right site of second sequence additional nucleotides
    (as many as |miRNA_end_5 - first_end|)
    like (dots are nucleotides to add, big letter are flanking sequences, small are input):

    AAAGGGGCTTTTagtcttaga
    TTTCCCCGAA....agaatct

    if miRNA_end_5 > first_end
    cut nucleotides from rigth site of flanks3_s and/or from right site of
    second sequence

    before cut:
    AAAGGGGCTTTTagtcttaga
    TTTCCCCGAAAATTcctcagaatct (-2, +2)

    After
    AAAGGGGCTTTTagtcttaga
    TTTCCCCGAAAAtcagaatct

    Returns list of tuples (frame, sequence_1 sequence_2)

    input: string, string, int, int, pri-miRNA objects
    output: List of list of Backbone object, 1st strand 2nd strand   """
    frames = []
    for elem in all_frames:
        frame = Backbone(**elem)
        if shift_left == frame.miRNA_end_5 and shift_right == frame.miRNA_end_5:
            frames.append([frame, seq1, seq2])
        else:
            _seq1 = seq1[:]
            _seq2 = seq2[:]
            #miRNA 5 end (left)
            if frame.miRNA_end_5 < shift_left:
                if frame.miRNA_end_5 < 0 and shift_left < 0:
                    _seq2 += reverse_complement(
                        frame.flanks5_s[frame.miRNA_end_5:shift_left])
                elif frame.miRNA_end_5 < 0 and shift_left > 0:
                    frame.flanks5_s = frame.flanks5_s[:frame.miRNA_end_5]
                    _seq2 += reverse_complement(_seq1[:shift_left])
                elif shift_left == 0:
                    _seq2 += reverse_complement(
                        frame.flanks5_s[:frame.miRNA_end_5])
                elif frame.miRNA_end_5 == 0:
                    _seq2 += reverse_complement(_seq1[:frame.miRNA_end_5])
                else:
                    _seq2 += reverse_complement(
                        _seq1[frame.miRNA_end_5:shift_left])
            elif frame.miRNA_end_5 > shift_left:
                if frame.miRNA_end_5 > 0 and shift_left < 0:
                    frame.flanks5_s += reverse_complement(
                        _seq2[frame.miRNA_end_5:])
                    frame.flanks3_s = frame.flanks3_s[frame.miRNA_end_5:]
                elif frame.miRNA_end_5 > 0 and shift_left > 0:
                    frame.flanks5_s += reverse_complement(
                        frame.flanks3_s[shift_left:frame.miRNA_end_5])
                elif shift_left == 0:
                    frame.flanks5_s += reverse_complement(
                        frame.flanks3_s[:frame.miRNA_end_5])
                elif frame.miRNA_end_5 == 0:
                    frame.flanks5_s += reverse_complement(_seq2[shift_left:])
                else:
                    frame.flanks5_s += reverse_complement(
                        _seq2[shift_left:frame.miRNA_end_5])

            #miRNA 3 end (right)
            if frame.miRNA_end_3 < shift_right:
                if frame.miRNA_end_3 < 0 and shift_right > 0:
                    frame.loop_s = frame.loop_s[-frame.miRNA_end_3:]
                    frame.loop_s += reverse_complement(
                        _seq1[-shift_right:])
                elif frame.miRNA_end_3 > 0 and shift_right > 0:
                    frame.loop_s += reverse_complement(
                        _seq1[-shift_right:-frame.miRNA_end_3])
                elif frame.miRNA_end_3 == 0:
                    frame.loop_s += reverse_complement(_seq1[-shift_right:])
                elif shift_right == 0:
                    frame.loop_s += reverse_complement(
                        frame.loop_s[:-frame.miRNA_end_3])
                else:
                    frame.loop_s += reverse_complement(
                        frame.loop_s[-shift_right:-frame.miRNA_end_3])
            elif frame.miRNA_end_3 > shift_right:
                if frame.miRNA_end_3 > 0 and shift_right < 0:
                    _seq1 += reverse_complement(
                        _seq2[:-shift_right])
                    frame.loop_s = frame.loop_s[:-frame.miRNA_end_3]
                elif frame.miRNA_end_3 > 0 and shift_right > 0:
                    _seq1 += reverse_complement(
                        frame.loop_s[-frame.miRNA_end_3:-shift_right])
                elif shift_right == 0:
                    _seq1 += reverse_complement(
                        frame.loop_s[:frame.miRNA_end_3])
                elif frame.miRNA_end_3 == 0:
                    _seq1 += reverse_complement(_seq2[:-shift_right])
                else:
                    _seq1 += reverse_complement(
                        _seq2[-frame.miRNA_end_3:-shift_right])

            frames.append([frame, _seq1, _seq2])
    return frames
Exemple #15
0
class CellDETR(nn.Module):
    """
    This class implements a DETR (Facebook AI) like instance segmentation model.
    """

    def __init__(self,
                 num_classes: int = 3,
                 number_of_query_positions: int = 12,
                 hidden_features=128,
                 backbone_channels: Tuple[Tuple[int, int], ...] = (
                         (1, 64), (64, 128), (128, 256), (256, 256)),
                 backbone_block: Type = ResNetBlock, backbone_convolution: Type = ModulatedDeformConvPack,
                 backbone_normalization: Type = nn.BatchNorm2d, backbone_activation: Type = PAU,
                 backbone_pooling: Type = nn.AvgPool2d,
                 bounding_box_head_features: Tuple[Tuple[int, int], ...] = ((128, 64), (64, 16), (16, 4)),
                 bounding_box_head_activation: Type = PAU,
                 classification_head_activation: Type = PAU,
                 num_encoder_layers: int = 3,
                 num_decoder_layers: int = 2,
                 dropout: float = 0.0,
                 transformer_attention_heads: int = 8,
                 transformer_activation: Type = PAU,
                 segmentation_attention_heads: int = 8,
                 segmentation_head_channels: Tuple[Tuple[int, int], ...] = (
                         (128 + 8, 128), (128, 64), (64, 32)),
                 segmentation_head_feature_channels: Tuple[int, ...] = (256, 128, 64),
                 segmentation_head_block: Type = ResPACFeaturePyramidBlock,
                 segmentation_head_convolution: Type = ModulatedDeformConvPack,
                 segmentation_head_normalization: Type = nn.InstanceNorm2d,
                 segmentation_head_activation: Type = PAU,
                 segmentation_head_final_activation: Type = nn.Sigmoid) -> None:
        """
        Constructor method
        :param num_classes: (int) Number of classes in the dataset
        :param number_of_query_positions: (int) Number of query positions
        :param hidden_features: (int) Number of hidden features in the transformer module
        :param backbone_channels: (Tuple[Tuple[int, int], ...]) In and output channels of each block in the backbone
        :param backbone_block: (Type) Type of block to be utilized in backbone
        :param backbone_convolution: (Type) Type of convolution to be utilized in the backbone
        :param backbone_normalization: (Type) Type of normalization to be used in the backbone
        :param backbone_activation: (Type) Type of activation function used in the backbone
        :param backbone_pooling: (Type) Type of pooling operation utilized in the backbone
        :param bounding_box_head_features: (Tuple[Tuple[int, int], ...]) In and output features of each layer in BB head
        :param bounding_box_head_activation: (Type) Type of activation function utilized in BB head
        :param classification_head_activation: (Type) Type of activation function utilized in classification head
        :param num_encoder_layers: (int) Number of layers in encoder part of the transformer module
        :param num_decoder_layers: (int) Number of layers in decoder part of the transformer module
        :param dropout: (float) Dropout factor used in transformer module and segmentation head
        :param transformer_attention_heads: (int) Number of attention heads in the transformer module
        :param transformer_activation: (Type) Type of activation function to be utilized in the transformer module
        :param segmentation_attention_heads: (int) Number of attention heads in the 2d multi head attention module
        :param segmentation_head_channels: (Tuple[Tuple[int, int], ...]) Number of in and output channels in seg. head
        :param segmentation_head_feature_channels: (Tuple[int, ...]) Backbone feature channels used in seg. head
        :param segmentation_head_block: (Type) Type of block to be utilized in segmentation head
        :param segmentation_head_convolution: (Type) Type of convolution utilized in segmentation head
        :param segmentation_head_normalization: (Type) Type of normalization used in segmentation head
        :param segmentation_head_activation: (Type) Type of activation used in segmentation head
        :param segmentation_head_final_activation: (Type) Type of activation function to be applied to the output pred
        """
        # Call super constructor
        super(CellDETR, self).__init__()
        # Init backbone
        self.backbone = Backbone(channels=backbone_channels, block=backbone_block, convolution=backbone_convolution,
                                 normalization=backbone_normalization, activation=backbone_activation,
                                 pooling=backbone_pooling)
        # Init convolution mapping to match transformer dims
        self.convolution_mapping = nn.Conv2d(in_channels=backbone_channels[-1][-1], out_channels=hidden_features,
                                             kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
        # Init query positions
        self.query_positions = nn.Parameter(
            data=torch.randn(number_of_query_positions, hidden_features, dtype=torch.float),
            requires_grad=True)
        # Init embeddings
        self.row_embedding = nn.Parameter(data=torch.randn(50, hidden_features // 2, dtype=torch.float),
                                          requires_grad=True)
        self.column_embedding = nn.Parameter(data=torch.randn(50, hidden_features // 2, dtype=torch.float),
                                             requires_grad=True)
        # Init transformer
        self.transformer = Transformer(d_model=hidden_features, nhead=transformer_attention_heads,
                                       num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers,
                                       dropout=dropout, dim_feedforward=4 * hidden_features,
                                       activation=transformer_activation)
        # Init bounding box head
        self.bounding_box_head = BoundingBoxHead(features=bounding_box_head_features,
                                                 activation=bounding_box_head_activation)
        # Init class head
        self.class_head = nn.Sequential(
            nn.Linear(in_features=hidden_features, out_features=hidden_features // 2, bias=True),
            classification_head_activation(),
            nn.Linear(in_features=hidden_features // 2, out_features=num_classes + 1, bias=True))
        # Init segmentation attention head
        self.segmentation_attention_head = MultiHeadAttention(query_dimension=hidden_features,
                                                              hidden_features=hidden_features,
                                                              number_of_heads=segmentation_attention_heads,
                                                              dropout=dropout)
        # Init segmentation head
        self.segmentation_head = SegmentationHead(channels=segmentation_head_channels,
                                                  feature_channels=segmentation_head_feature_channels,
                                                  convolution=segmentation_head_convolution,
                                                  normalization=segmentation_head_normalization,
                                                  activation=segmentation_head_activation,
                                                  block=segmentation_head_block,
                                                  number_of_query_positions=number_of_query_positions,
                                                  softmax=isinstance(segmentation_head_final_activation(), nn.Softmax))
        # Init final segmentation activation
        self.segmentation_final_activation = segmentation_head_final_activation(dim=1) if isinstance(
            segmentation_head_final_activation(), nn.Softmax) else segmentation_head_final_activation()

    def get_parameters(self, lr_main: float = 1e-04, lr_backbone: float = 1e-05) -> Iterable:
        """
        Method returns all parameters of the model with different learning rates
        :param lr_main: (float) Leaning rate of all parameters which are not included in the backbone
        :param lr_backbone: (float) Leaning rate of the backbone parameters
        :return: (Iterable) Iterable object including the main parameters of the generator network
        """
        return [{'params': self.backbone.parameters(), 'lr': lr_backbone},
                {'params': self.convolution_mapping.parameters(), 'lr': lr_main},
                {'params': [self.row_embedding], 'lr': lr_main},
                {'params': [self.column_embedding], 'lr': lr_main},
                {'params': self.transformer.parameters(), 'lr': lr_main},
                {'params': self.bounding_box_head.parameters(), 'lr': lr_main},
                {'params': self.class_head.parameters(), 'lr': lr_main},
                {'params': self.segmentation_attention_head.parameters(), 'lr': lr_main},
                {'params': self.segmentation_head.parameters(), 'lr': lr_main}]

    def get_segmentation_head_parameters(self, lr: float = 1e-05) -> Iterable:
        """
        Method returns all parameter of the segmentation head and the 2d multi head attention module
        :param lr: (float) Learning rate to be utilized
        :return: (Iterable) Iterable object including the parameters of the segmentation head
        """
        return [{'params': self.segmentation_attention_head.parameters(), 'lr': lr},
                {'params': self.segmentation_head.parameters(), 'lr': lr}]

    def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        Forward pass
        :param input: (torch.Tensor) Input image of shape (batch size, channels, height, width)
        :return: (Tuple[torch.Tensor, torch.Tensor, torch.Tensor]) Class prediction, bounding box predictions and
        segmentation maps
        """
        # Get features from backbone
        features, feature_list = self.backbone(input)
        # Map features to the desired shape
        features = self.convolution_mapping(features)
        # Get height and width of features
        height, width = features.shape[2:]
        # Get batch size
        batch_size = features.shape[0]
        # Make positional embeddings
        positional_embeddings = torch.cat([self.column_embedding[:height].unsqueeze(dim=0).repeat(height, 1, 1),
                                           self.row_embedding[:width].unsqueeze(dim=1).repeat(1, width, 1)],
                                          dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(batch_size, 1, 1, 1)
        latent_tensor, features_encoded = self.transformer(features, None, self.query_positions, positional_embeddings)
        latent_tensor = latent_tensor.permute(2, 0, 1)
        # Get class prediction
        class_prediction = F.softmax(self.class_head(latent_tensor), dim=2).clone()
        # Get bounding boxes
        bounding_box_prediction = self.bounding_box_head(latent_tensor)
        # Get bounding box attention masks for segmentation
        bounding_box_attention_masks = self.segmentation_attention_head(
            latent_tensor, features_encoded.contiguous())
        # Get instance segmentation prediction
        instance_segmentation_prediction = self.segmentation_head(features.contiguous(),
                                                                  bounding_box_attention_masks.contiguous(),
                                                                  feature_list[-2::-1])
        return class_prediction, \
               bounding_box_prediction.sigmoid().clone(), \
               self.segmentation_final_activation(instance_segmentation_prediction).clone()
Exemple #16
0
def train(params, _run=None):
    params = Params(params)

    set_random_seeds(params.seed)

    time_now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    params.save_root = params.save_root + f'/{params.project_name}_{time_now}_{params.version}'
    os.makedirs(params.save_root, exist_ok=True)

    logging.basicConfig(filename=f'{params.save_root}/{params.project_name}_{time_now}_{params.version}.log',
                        filemode='a', format='%{asctime}s - %(levalname)s: %(message)s')

    if params.num_gpus == 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    logging.info(f'Available GPUs: {torch.cuda.device_count()}')

    train2007, train_label_2007, train_bb_2007 = load_annotation(os.path.join(params.data_root, 'VOC2007'), 'trainval')
    test2007, test_label_2007, test_bb_2007 = load_annotation(os.path.join(params.data_root, 'VOC2007'), 'test')
    train2012, train_label_2012, train_bb_2012 = load_annotation(os.path.join(params.data_root, 'VOC2012'), 'trainval')
    test2012, test_label_2012, test_bb_2012 = load_annotation(os.path.join(params.data_root, 'VOC2012'), 'test')
    train_data = train2007+test2007+train2012
    train_label = train_label_2007+test_label_2007+train_label_2012
    train_bb = train_bb_2007 + test_bb_2007 + train_bb_2012
    test_data = test2012
    test_label = test_label_2012
    test_bb = test_bb_2012

    train_dataset = YoloDataset(train_data, train_bb, train_label, params, train=True)
    eval_dataset = YoloDataset(test_data, test_bb, test_label, params, train=False)
    train_loader = DataLoader(dataset=train_dataset, num_workers=params.num_gpus*8, batch_size=params.batch_size,
                              shuffle=True, drop_last=True, pin_memory=True)
    eval_loader = DataLoader(dataset=eval_dataset, num_workers=1, batch_size=1,
                             shuffle=False, pin_memory=True)

    model = Backbone()
    last_step = 0
    last_epoch = 0

    if params.num_gpus > 0:
        model = model.cuda()
        if params.num_gpus > 1:
            model = nn.DataParallel(model)

    if params.optim == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=params.learning_rate)
    else:
        optimizer = torch.optim.SGD(model.parameters(), lr=params.learning_rate, momentum=0.9, nesterov=True, weight_decay=0.0005)

    criterion = SumSquareError()
    schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.5, verbose=True, patience=10)

    epoch = 0
    begin_epoch = max(0, last_epoch)
    step = max(0, last_step)
    best_loss = 1e6
    logging.info('Begin to train...')
    model.train()
    import cv2 as cv
    try:
        for epoch in range(begin_epoch, params.epoch):
            for iter, (img, annotation) in enumerate(train_loader):
                output = model(img.cuda())
                loss = criterion(output, annotation.cuda())
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                if iter % params.save_interval == 0:
                    logging.info(f'{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")} '
                                 f'Train Epoch: {epoch} iter: {iter} loss: {loss.item()}')
                step += 1
            if epoch % params.eval_interval == 0:
                model.eval()
                epoch_loss = 0
                with torch.no_grad():
                    for iter, (img, annotation) in enumerate(eval_loader):
                        output = model(img.cuda())
                        loss = criterion(output, annotation.cuda()).item()
                        epoch_loss += loss * len(img)
                    loss = epoch_loss / len(eval_dataset)
                    logging.info(f'{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")} '
                                 f'Eval Epoch: {epoch} loss: {loss}')
                    schedule.step(loss)
                    if loss < best_loss:
                        best_loss = loss
                        save_checkpoint(model, f'{params.save_root}/{epoch}_{step}.pth')
                model.train()

    except KeyboardInterrupt:
        save_checkpoint(model, f'{params.save_root}/Interrupt_{epoch}_{step}.pth')
Exemple #17
0
        line = line.encode('UTF-8')
        lines.append(line)
    with file(filename, 'a') as outfile:
        outfile.writelines(lines)
    
def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)
        
def get_since_id(path):
    since_id  = 0
    for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern,re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id

if __name__ == "__main__":        
    b = Backbone()

    datasetpath = 'datasets/IMDb'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id =  b.searchTweets('I rated #IMDB', since_id)
    dataset = extractDataset(tweets)
    writeDataset(dataset, datasetpath + '/ratings.dat')
    writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #18
0
            lines.append(line)
    with file(path + '/ratings.dat', 'a') as outfile:
        outfile.writelines(lines)

    
def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)
        
def get_since_id(path):
    since_id  = 0
    for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern,re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id

if __name__ == "__main__":    
    b = Backbone()

    datasetpath = 'datasets/goodreads'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id =  b.searchTweets('of 5 stars to +goodreads', since_id)
    ratings, items, users = extractDataset(tweets)
    writeDataset(ratings, items, users, datasetpath)
    writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #19
0
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
        ])
    else:
        augmentation = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
        ])

    # configuration
    currentTime = datetime.datetime.now()
    currentTime = currentTime.strftime('%m%d%H%M%S')
    writer = SummaryWriter()
    model = Backbone()
    model = model.cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.01,
                                momentum=0.9,
                                weight_decay=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 40, gamma=0.1)
    if not args.eval:
        train_dataset = cifar10(transform=augmentation, eta=args.eta)
        train_loader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.workers,
                                  pin_memory=True)
    else:
        test_dataset = cifar10(transform=augmentation, if_test=True)
Exemple #20
0

def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii=False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)


def get_since_id(path):
    since_id = 0
    for infile in glob.glob(os.path.join(path, 'tweets_*.json')):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern, re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id


if __name__ == "__main__":
    b = Backbone()

    datasetpath = 'datasets/Pandora'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id = b.searchTweets(
        'I am listening to by on Pandora #pandora', since_id)
    dataset = extractDataset(tweets)
    writeDataset(dataset, datasetpath + '/listens.dat')
    writeTweets(tweets, datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #21
0
    print "wrote it!"

def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)

def get_since_id(path):
    since_id  = 0
    for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern,re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id

if __name__ == "__main__":

    b = Backbone()

    datasetpath = 'dataset/shelfari'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id =  b.searchTweets('rated it stars +shelfari', since_id)

    dataset = extractDataset(tweets)
    writeDataset(dataset, datasetpath + '/ratings.dat')
    writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #22
0
            kd_spatial_loss += torch.dist(
                t_spatial_pool,
                self.spatial_wise_adaptation[_i](s_spatial_pool)) * 4e-3 * 6

        losses.update({'kd_feat_loss': kd_feat_loss})
        losses.update({'kd_channel_loss': kd_channel_loss})
        losses.update({'kd_spatial_loss': kd_spatial_loss})

        kd_nonlocal_loss = 0
        for _i, (t, s) in enumerate(zip(t_feats, s_feats)):
            s_relation = self.student_non_local[_i](s)
            t_relation = self.teacher_non_local[_i](t)
            kd_nonlocal_loss += torch.dist(
                self.non_local_adaptation[_i](s_relation), t_relation, p=2)
        losses.update(kd_nonlocal_loss=kd_nonlocal_loss * 7e-5 * 6)
        return losses


student = Backbone(32)
teacher = Backbone(256)
dist_head = DistHead(32, 256)
x = torch.randn(4, 3, 640, 640)

s_feats = student(x)
with torch.no_grad():
    t_feats = teacher(x)

losses = dist_head(s_feats, t_feats)

for loss_name, value in losses.items():
    print(loss_name, value)
def main(args):
    model = Backbone()
    if args.use_gpu:
        model.load_state_dict(torch.load(args.model_path), strict=True)
    else:
        model.load_state_dict(torch.load(args.model_path, map_location='cpu'),
                              strict=True)

    device = 'cuda' if args.use_gpu else 'cpu'
    model = model.to(device)
    # print(model)

    model.eval()
    batch_size = 1
    x = torch.randn(batch_size,
                    3,
                    args.input_size,
                    args.input_size,
                    requires_grad=True).to(device)
    torch_out = model(x)

    print("파이토치 모델 실행시간 측정")
    test_cnt = 5
    total_time = 0.
    for i in range(test_cnt):
        x = torch.randn(batch_size,
                        3,
                        args.input_size,
                        args.input_size,
                        requires_grad=True).to(device)
        start = time.time()
        torch_out = model(x)
        total_time += time.time() - start
    pytorch_exec_time = total_time / test_cnt
    print("파이토치 모델 실행시간 측정 완료")

    os.makedirs(os.path.dirname(args.onnx_output_path), exist_ok=True)
    # 모델 변환
    torch.onnx.export(
        model,  # 실행될 모델
        x,  # 모델 입력값 (튜플 또는 여러 입력값들도 가능)
        args.onnx_output_path,  # 모델 저장 경로 (파일 또는 파일과 유사한 객체 모두 가능)
        export_params=True,  # 모델 파일 안에 학습된 모델 가중치를 저장할지의 여부
        # opset_version=11,          # 모델을 변환할 때 사용할 ONNX 버전
        do_constant_folding=True,  # 최적하시 상수폴딩을 사용할지의 여부
        input_names=['input'],  # 모델의 입력값을 가리키는 이름
        output_names=['output'],  # 모델의 출력값을 가리키는 이름
        # )
        # dynamic_axes={'input': {2: 'height', 3: 'width'}}
    )

    print("onnx 모델 컨버팅 완료")

    print("onnx 모델 로딩")
    ort_session = onnxruntime.InferenceSession(args.onnx_output_path)
    print("onnx 모델 로딩 완료")
    ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
    ort_outs = ort_session.run(None, ort_inputs)

    print("onnx 모델 실행시간 측정")
    total_time = 0.
    for i in range(test_cnt):
        ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}

        start = time.time()
        ort_outs = ort_session.run(None, ort_inputs)
        total_time += time.time() - start
    onnx_exec_time = total_time / test_cnt
    print("파이토치 모델 실행시간 측정 완료")

    np.testing.assert_allclose(to_numpy(torch_out),
                               ort_outs[0],
                               rtol=1e-03,
                               atol=1e-05)
    print(
        "Exported model has been tested with ONNXRuntime, and the result looks good!"
    )

    print("onnx 평균시간", onnx_exec_time, "파이토치 평균시간", pytorch_exec_time)
Exemple #24
0
    def __init__(self, pretrained=False):
        super(Detector, self).__init__()

        # ---------------------------
        # TODO: Param
        self.regions = [0, 64, 128, 256, 512, 9999]
        self.first_stride = 8
        self.view_size = 1025
        self.classes = 80
        self.nms_th = 0.05
        self.nms_iou = 0.6
        self.max_detections = 3000
        self.center_offset_ratio = 1.5
        # ---------------------------

        # fpn =======================================================
        self.backbone = Backbone(pretrained=pretrained)
        self.relu = nn.ReLU(inplace=True)
        self.conv_out6 = nn.Conv2d(2048,
                                   256,
                                   kernel_size=3,
                                   padding=1,
                                   stride=2)
        self.conv_out7 = nn.Conv2d(256,
                                   256,
                                   kernel_size=3,
                                   padding=1,
                                   stride=2)
        self.prj_5 = nn.Conv2d(2048, 256, kernel_size=1)
        self.prj_4 = nn.Conv2d(1024, 256, kernel_size=1)
        self.prj_3 = nn.Conv2d(512, 256, kernel_size=1)
        self.conv_5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)

        # head =======================================================
        self.conv_cls = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(256,
                                             256,
                                             kernel_size=3,
                                             padding=1), nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(256,
                                             256,
                                             kernel_size=3,
                                             padding=1), nn.ReLU(inplace=True),
            nn.Conv2d(256, self.classes, kernel_size=3, padding=1))
        self.conv_reg = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(256,
                                             256,
                                             kernel_size=3,
                                             padding=1), nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(256,
                                             256,
                                             kernel_size=3,
                                             padding=1), nn.ReLU(inplace=True),
            nn.Conv2d(256, 4, kernel_size=3, padding=1))

        # reinit head =======================================================
        for layer in self.conv_cls.children():
            if isinstance(layer, nn.Conv2d):
                nn.init.constant_(layer.bias, 0)
                nn.init.normal_(layer.weight, mean=0, std=0.01)
        for layer in self.conv_reg.children():
            if isinstance(layer, nn.Conv2d):
                nn.init.constant_(layer.bias, 0)
                nn.init.normal_(layer.weight, mean=0, std=0.01)
        pi = 0.01
        _bias = -math.log((1.0 - pi) / pi)
        nn.init.constant_(self.conv_cls[-1].bias, _bias)

        # learnable parameter for scales =====================================
        self.scale_div = nn.Parameter(torch.ones(len(self.regions) - 1))

        # generate anchors ===================================================
        self._a_center_yx, self._a_tlbr_max_minmax, self._a_center_offset_max = \
            gen_anchors(self.view_size, self.first_stride, self.regions, self.center_offset_ratio)
        self.a_shw = self._a_center_yx.shape[0]
        self.register_buffer('a_center_yx', self._a_center_yx)
        self.register_buffer('a_tlbr_max_minmax', self._a_tlbr_max_minmax)
        self.register_buffer('a_center_offset_max', self._a_center_offset_max)
Exemple #25
0
        outfile.writelines(lines)


def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii=False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)


def get_since_id(path):
    since_id = 0
    for infile in glob.glob(os.path.join(path, 'tweets_*.json')):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern, re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id


if __name__ == "__main__":
    b = Backbone()

    datasetpath = 'datasets/Goodreads'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id = b.searchTweets('of 5 stars to', since_id)
    ratings, items, users = extractDataset(tweets)
    writeDataset(ratings, items, users, datasetpath)
    writeTweets(tweets, datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #26
0
def get_frames(seq1, seq2, shift_left, shift_right, all_frames):
    """Take output of check_input function and insert into flanking sequences.
    take from database all miRNA results and check if ends of input is suitable
    for flanking sequences.
    If first value == and miRNA_end_5 second value == miRNA_end_3 then simply
    concatenate
    sequences flanks5_s + first_sequence + loop_s + second_sequence + flanks3_s.
    If any end is different function has to modify end of the insert:
    Right end:
    if miRNA_end_5 < first_end
    add to right site of second sequence additional nucleotides
    (as many as |miRNA_end_5 - first_end|)
    like (dots are nucleotides to add, big letter are flanking sequences, small are input):

    AAAGGGGCTTTTagtcttaga
    TTTCCCCGAA....agaatct

    if miRNA_end_5 > first_end
    cut nucleotides from rigth site of flanks3_s and/or from right site of
    second sequence

    before cut:
    AAAGGGGCTTTTagtcttaga
    TTTCCCCGAAAATTcctcagaatct (-2, +2)

    After
    AAAGGGGCTTTTagtcttaga
    TTTCCCCGAAAAtcagaatct

    Returns list of tuples (frame, sequence_1 sequence_2)

    input: string, string, int, int, pri-miRNA objects
    output: List of list of Backbone object, 1st strand 2nd strand   """
    frames = []
    for elem in all_frames:
        frame = Backbone(**elem)
        if shift_left == frame.miRNA_end_5 and shift_right == frame.miRNA_end_5:
            frames.append([frame, seq1, seq2])
        else:
            _seq1 = seq1[:]
            _seq2 = seq2[:]
            #miRNA 5 end (left)
            if frame.miRNA_end_5 < shift_left:
                if frame.miRNA_end_5 < 0 and shift_left < 0:
                    _seq2 += reverse_complement(
                        frame.flanks5_s[frame.miRNA_end_5:shift_left])
                elif frame.miRNA_end_5 < 0 and shift_left > 0:
                    frame.flanks5_s = frame.flanks5_s[:frame.miRNA_end_5]
                    _seq2 += reverse_complement(_seq1[:shift_left])
                elif shift_left == 0:
                    _seq2 += reverse_complement(
                        frame.flanks5_s[:frame.miRNA_end_5])
                elif frame.miRNA_end_5 == 0:
                    _seq2 += reverse_complement(_seq1[:frame.miRNA_end_5])
                else:
                    _seq2 += reverse_complement(
                        _seq1[frame.miRNA_end_5:shift_left])
            elif frame.miRNA_end_5 > shift_left:
                if frame.miRNA_end_5 > 0 and shift_left < 0:
                    frame.flanks5_s += reverse_complement(
                        _seq2[frame.miRNA_end_5:])
                    frame.flanks3_s = frame.flanks3_s[frame.miRNA_end_5:]
                elif frame.miRNA_end_5 > 0 and shift_left > 0:
                    frame.flanks5_s += reverse_complement(
                        frame.flanks3_s[shift_left:frame.miRNA_end_5])
                elif shift_left == 0:
                    frame.flanks5_s += reverse_complement(
                        frame.flanks3_s[:frame.miRNA_end_5])
                elif frame.miRNA_end_5 == 0:
                    frame.flanks5_s += reverse_complement(_seq2[shift_left:])
                else:
                    frame.flanks5_s += reverse_complement(
                        _seq2[shift_left:frame.miRNA_end_5])

            #miRNA 3 end (right)
            if frame.miRNA_end_3 < shift_right:
                if frame.miRNA_end_3 < 0 and shift_right > 0:
                    frame.loop_s = frame.loop_s[-frame.miRNA_end_3:]
                    frame.loop_s += reverse_complement(_seq1[-shift_right:])
                elif frame.miRNA_end_3 > 0 and shift_right > 0:
                    frame.loop_s += reverse_complement(
                        _seq1[-shift_right:-frame.miRNA_end_3])
                elif frame.miRNA_end_3 == 0:
                    frame.loop_s += reverse_complement(_seq1[-shift_right:])
                elif shift_right == 0:
                    frame.loop_s += reverse_complement(
                        frame.loop_s[:-frame.miRNA_end_3])
                else:
                    frame.loop_s += reverse_complement(
                        frame.loop_s[-shift_right:-frame.miRNA_end_3])
            elif frame.miRNA_end_3 > shift_right:
                if frame.miRNA_end_3 > 0 and shift_right < 0:
                    _seq1 += reverse_complement(_seq2[:-shift_right])
                    frame.loop_s = frame.loop_s[:-frame.miRNA_end_3]
                elif frame.miRNA_end_3 > 0 and shift_right > 0:
                    _seq1 += reverse_complement(
                        frame.loop_s[-frame.miRNA_end_3:-shift_right])
                elif shift_right == 0:
                    _seq1 += reverse_complement(
                        frame.loop_s[:frame.miRNA_end_3])
                elif frame.miRNA_end_3 == 0:
                    _seq1 += reverse_complement(_seq2[:-shift_right])
                else:
                    _seq1 += reverse_complement(
                        _seq2[-frame.miRNA_end_3:-shift_right])

            frames.append([frame, _seq1, _seq2])
    return frames
Exemple #27
0
        line = line.encode('UTF-8')
        lines.append(line)
    with file(filename, 'a') as outfile:
        outfile.writelines(lines)
    
def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)
        
def get_since_id(path):
    since_id  = 0
    for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern,re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id

if __name__ == "__main__":        
    b = Backbone()

    datasetpath = 'datasets/Lastfm'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id =  b.searchTweets("I'm listening to via @lastfm", since_id)
    dataset = extractDataset(tweets)
    writeDataset(dataset, datasetpath + '/listens.dat')
    writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #28
0
        line = line.encode('UTF-8')
        lines.append(line)
    with file(filename, 'a') as outfile:
        outfile.writelines(lines)
    
def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)
        
def get_since_id(path):
    since_id  = 0
    for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern,re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id

if __name__ == "__main__":        
    b = Backbone()

    datasetpath = 'datasets/Pandora'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id =  b.searchTweets('I am listening to by on Pandora #pandora', since_id)
    dataset = extractDataset(tweets)
    writeDataset(dataset, datasetpath + '/listens.dat')
    writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')
Exemple #29
0
 def __init__(self,
              num_classes: int = 3,
              number_of_query_positions: int = 12,
              hidden_features=128,
              backbone_channels: Tuple[Tuple[int, int], ...] = (
                      (1, 64), (64, 128), (128, 256), (256, 256)),
              backbone_block: Type = ResNetBlock, backbone_convolution: Type = ModulatedDeformConvPack,
              backbone_normalization: Type = nn.BatchNorm2d, backbone_activation: Type = PAU,
              backbone_pooling: Type = nn.AvgPool2d,
              bounding_box_head_features: Tuple[Tuple[int, int], ...] = ((128, 64), (64, 16), (16, 4)),
              bounding_box_head_activation: Type = PAU,
              classification_head_activation: Type = PAU,
              num_encoder_layers: int = 3,
              num_decoder_layers: int = 2,
              dropout: float = 0.0,
              transformer_attention_heads: int = 8,
              transformer_activation: Type = PAU,
              segmentation_attention_heads: int = 8,
              segmentation_head_channels: Tuple[Tuple[int, int], ...] = (
                      (128 + 8, 128), (128, 64), (64, 32)),
              segmentation_head_feature_channels: Tuple[int, ...] = (256, 128, 64),
              segmentation_head_block: Type = ResPACFeaturePyramidBlock,
              segmentation_head_convolution: Type = ModulatedDeformConvPack,
              segmentation_head_normalization: Type = nn.InstanceNorm2d,
              segmentation_head_activation: Type = PAU,
              segmentation_head_final_activation: Type = nn.Sigmoid) -> None:
     """
     Constructor method
     :param num_classes: (int) Number of classes in the dataset
     :param number_of_query_positions: (int) Number of query positions
     :param hidden_features: (int) Number of hidden features in the transformer module
     :param backbone_channels: (Tuple[Tuple[int, int], ...]) In and output channels of each block in the backbone
     :param backbone_block: (Type) Type of block to be utilized in backbone
     :param backbone_convolution: (Type) Type of convolution to be utilized in the backbone
     :param backbone_normalization: (Type) Type of normalization to be used in the backbone
     :param backbone_activation: (Type) Type of activation function used in the backbone
     :param backbone_pooling: (Type) Type of pooling operation utilized in the backbone
     :param bounding_box_head_features: (Tuple[Tuple[int, int], ...]) In and output features of each layer in BB head
     :param bounding_box_head_activation: (Type) Type of activation function utilized in BB head
     :param classification_head_activation: (Type) Type of activation function utilized in classification head
     :param num_encoder_layers: (int) Number of layers in encoder part of the transformer module
     :param num_decoder_layers: (int) Number of layers in decoder part of the transformer module
     :param dropout: (float) Dropout factor used in transformer module and segmentation head
     :param transformer_attention_heads: (int) Number of attention heads in the transformer module
     :param transformer_activation: (Type) Type of activation function to be utilized in the transformer module
     :param segmentation_attention_heads: (int) Number of attention heads in the 2d multi head attention module
     :param segmentation_head_channels: (Tuple[Tuple[int, int], ...]) Number of in and output channels in seg. head
     :param segmentation_head_feature_channels: (Tuple[int, ...]) Backbone feature channels used in seg. head
     :param segmentation_head_block: (Type) Type of block to be utilized in segmentation head
     :param segmentation_head_convolution: (Type) Type of convolution utilized in segmentation head
     :param segmentation_head_normalization: (Type) Type of normalization used in segmentation head
     :param segmentation_head_activation: (Type) Type of activation used in segmentation head
     :param segmentation_head_final_activation: (Type) Type of activation function to be applied to the output pred
     """
     # Call super constructor
     super(CellDETR, self).__init__()
     # Init backbone
     self.backbone = Backbone(channels=backbone_channels, block=backbone_block, convolution=backbone_convolution,
                              normalization=backbone_normalization, activation=backbone_activation,
                              pooling=backbone_pooling)
     # Init convolution mapping to match transformer dims
     self.convolution_mapping = nn.Conv2d(in_channels=backbone_channels[-1][-1], out_channels=hidden_features,
                                          kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
     # Init query positions
     self.query_positions = nn.Parameter(
         data=torch.randn(number_of_query_positions, hidden_features, dtype=torch.float),
         requires_grad=True)
     # Init embeddings
     self.row_embedding = nn.Parameter(data=torch.randn(50, hidden_features // 2, dtype=torch.float),
                                       requires_grad=True)
     self.column_embedding = nn.Parameter(data=torch.randn(50, hidden_features // 2, dtype=torch.float),
                                          requires_grad=True)
     # Init transformer
     self.transformer = Transformer(d_model=hidden_features, nhead=transformer_attention_heads,
                                    num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers,
                                    dropout=dropout, dim_feedforward=4 * hidden_features,
                                    activation=transformer_activation)
     # Init bounding box head
     self.bounding_box_head = BoundingBoxHead(features=bounding_box_head_features,
                                              activation=bounding_box_head_activation)
     # Init class head
     self.class_head = nn.Sequential(
         nn.Linear(in_features=hidden_features, out_features=hidden_features // 2, bias=True),
         classification_head_activation(),
         nn.Linear(in_features=hidden_features // 2, out_features=num_classes + 1, bias=True))
     # Init segmentation attention head
     self.segmentation_attention_head = MultiHeadAttention(query_dimension=hidden_features,
                                                           hidden_features=hidden_features,
                                                           number_of_heads=segmentation_attention_heads,
                                                           dropout=dropout)
     # Init segmentation head
     self.segmentation_head = SegmentationHead(channels=segmentation_head_channels,
                                               feature_channels=segmentation_head_feature_channels,
                                               convolution=segmentation_head_convolution,
                                               normalization=segmentation_head_normalization,
                                               activation=segmentation_head_activation,
                                               block=segmentation_head_block,
                                               number_of_query_positions=number_of_query_positions,
                                               softmax=isinstance(segmentation_head_final_activation(), nn.Softmax))
     # Init final segmentation activation
     self.segmentation_final_activation = segmentation_head_final_activation(dim=1) if isinstance(
         segmentation_head_final_activation(), nn.Softmax) else segmentation_head_final_activation()
            lines.append(line)
    with file(path + '/ratings.dat', 'a') as outfile:
        outfile.writelines(lines)

    
def writeTweets(tweets, filename):
    line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
    with file(filename, 'w') as outfile:
        outfile.writelines(line)
        
def get_since_id(path):
    since_id  = 0
    for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
        pattern = 'tweets_([0-9]*).json'
        p = re.compile(pattern,re.M | re.I)
        matches = p.findall(infile)
        id = int(matches[0])
        #keep maximum id
        since_id = max(id, since_id)
    return since_id

if __name__ == "__main__":    
    b = Backbone()

    datasetpath = 'datasets/Goodreads'
    since_id = get_since_id(datasetpath)

    tweets, new_since_id =  b.searchTweets('of 5 stars to', since_id)
    ratings, items, users = extractDataset(tweets)
    writeDataset(ratings, items, users, datasetpath)
    writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')