示例#1
0
def add_post():
    """
    This method should add a new post in db
    :return: a notification of successful or errors
    """
    params = {
        'title': fields.String(),
        'content': fields.String()
    }
    json_data = parse_req(params)
    content = json_data.get("content", None)
    if check_input(content):
        return send_error(code=406, message="Content contains special characters")
    title = json_data.get('title', None)
    if check_input(title):
        return send_error(code=406, message="Title contains special characters")
    query_data = {
        '_id': str(ObjectId()),
        'title': title,
        'content': content
    }
    try:
        result = news.insert(query_data)
        return send_result(result)
    except:
        return send_error(code=400)
def main(minRNA, maxRNA, basename, output_loc):
    check_input()
    write_log_info(minRNA, maxRNA, basename)
    output_di, len_tot_di = make_outfile_dict(minRNA, maxRNA, basename)
    len_tot_di, tot_reads = separate_reads_by_length(basename, output_di, minRNA, maxRNA, len_tot_di)
    len_tot_di = convert_histogram_to_values(len_tot_di, tot_reads)
    close_output_files(output_di)
    write_length_histogram(len_tot_di, basename, output_loc)
def main(file, log_dir,  conf):
    check_input()
    cfg = load_mirquant_config_file(conf)
    dirc, name, need_adapt = check_for_adapter_file(file)
    if need_adapt == True:
        if not check_for_barcode_file(dirc, name, cfg):
            barcode = scan_fastq_for_barcode(file, name, log_dir)
            adapter = create_adapter(barcode, cfg['cutadapt']['adapter'])
            write_adapter_file(dirc, name, adapter)
def main(file, log_dir, conf):
    check_input()
    cfg = load_mirquant_config_file(conf)
    dirc, name, need_adapt = check_for_adapter_file(file)
    if need_adapt == True:
        if not check_for_barcode_file(dirc, name, cfg):
            barcode = scan_fastq_for_barcode(file, name, log_dir)
            adapter = create_adapter(barcode, cfg['cutadapt']['adapter'])
            write_adapter_file(dirc, name, adapter)
示例#5
0
def main(minRNA, maxRNA, basename, output_loc):
    check_input()
    write_log_info(minRNA, maxRNA, basename)
    output_di, len_tot_di = make_outfile_dict(minRNA, maxRNA, basename)
    len_tot_di, tot_reads = separate_reads_by_length(basename, output_di,
                                                     minRNA, maxRNA,
                                                     len_tot_di)
    len_tot_di = convert_histogram_to_values(len_tot_di, tot_reads)
    close_output_files(output_di)
    write_length_histogram(len_tot_di, basename, output_loc)
def main(conf, shrimp_dir):
    check_input()
    cfg = load_mirquant_config_file(conf)
    res_li = resource_paths(cfg['parameters']['species'], cfg['paths'], cfg['parameters'])
    sample = os.path.basename(shrimp_dir.split('./IntermediateFiles/')[0])
    out_di = sample_output_paths(cfg['paths']['output'], sample) 
    logging.info('\n\n### Processing SHRiMP results ###\n')
    mir_fi = res_li[1]
    mirList, mirStrand = load_mir_info(mir_fi)
    tagCount, hits, maps = load_SHRiMP_res(shrimp_dir)    
    hits, tags, pCount = get_best_alignments(hits, maps)
    write_processed_shrimp_output(hits, tags, mirList, mirStrand, shrimp_dir)
    remove_temp_file(os.path.basename(shrimp_dir).split('_')[1], out_di['temp'])
    logging.info('Total SHRiMP alignments = {}; proportional count = {}'.format(tagCount, pCount))
示例#7
0
def main(conf, shrimp_dir):
    print 'Shrimp post-processing'
    check_input()
    cfg = load_mirquant_config_file(conf)
    res_li = resource_paths(cfg['parameters']['species'], cfg['paths'], cfg['parameters'])
    sample = os.path.basename(shrimp_dir.split('./IntermediateFiles/')[0])
    out_di = sample_output_paths(cfg['paths']['output'], sample) 
    logging.info('\n\n### Processing SHRiMP results ###\n')
    mir_fi = res_li[1]
    mirList, mirStrand = load_mir_info(mir_fi)
    tagCount, hits, maps = load_SHRiMP_res(shrimp_dir)    
    hits, tags, pCount = get_best_alignments(hits, maps)
    write_processed_shrimp_output(hits, tags, mirList, mirStrand, shrimp_dir)
    remove_temp_file(os.path.basename(shrimp_dir).split('_')[1], out_di['temp'])
    logging.info('Total SHRiMP alignments = {}; proportional count = {}'.format(tagCount, pCount))
示例#8
0
def main(input_str):

    """
    Main function takes string input and returns the best results depending
    on scoring. Single result include sh-miR sequence,
    score and link to 2D structure from mfold program
    """
    sequence = check_input(input_str)
    seq1, seq2, shift_left, shift_right = sequence
    if not seq2:
        seq2 = reverse_complement(seq1)
    all_frames = get_all()
    if 'error' in all_frames: #database error handler
        return all_frames

    frames = get_frames(seq1, seq2, shift_left, shift_right, all_frames)
    original_frames = [Backbone(**elem) for elem in all_frames]


    frames_with_score = []
    for frame_tuple, original in zip(frames, original_frames):
        score = 0
        frame, insert1, insert2 = frame_tuple
        mfold_data = mfold(frame.template(insert1, insert2))
        if 'error' in mfold_data:
            return mfold_data
        pdf, ss = mfold_data[0], mfold_data[1]
        score += score_frame(frame_tuple, ss, original)
        score += score_homogeneity(original)
        score += two_same_strands_score(seq1, original)
        frames_with_score.append((score, frame.template(insert1, insert2), frame.name, pdf))

    sorted_frames = [elem for elem in sorted(frames_with_score,\
                        key=lambda x: x[0], reverse=True) if elem[0] > 60]
    return {'result': sorted_frames[:3]}
示例#9
0
def CNV_plot(cnv_file, subdir, png):
    """Make CNV plots using R cnv library"""

    os.system('mkdir -p working/cnv_seq/CNV')
    os.system('mkdir -p working/cnv_seq/CNV/' 
              + subdir + '/')
    output_cnv_file = 'working/cnv_seq/CNV/' + subdir + '/' + cnv_file.split('.hits')[0].rstrip('T')  + '.cnvs'
    os.system('rm ' + output_cnv_file)
    rtmp = 'rtmp' + str(random.randint(0,1000))
    with open(rtmp, 'w') as f:
        f.write("source('funcs.R')\n")
        f.write('library(cnv)\n')
        f.write("data<-read.delim('"
                + cnv_file + "')\n")
        f.write("png('" + png + "')\n")
        f.write("plot.cnv.all.perry(data,colour=9)\n")
        f.write('dev.off()\n')
        f.write("cnv.print(data, file='" + output_cnv_file + "')\n")
        f.write('q()\n')
    if utils.check_input(cnv_file):
        os.system('R CMD BATCH --vanilla ' + rtmp + ' tmpLog')
        os.system('rm ' + rtmp + ' tmpLog')
    os.system('mv ' + cnv_file + ' working/cnv_seq/CNV/' + subdir + '/')
    os.system('mv ' + cnv_file.replace('cnv', 'count') 
              + ' working/cnv_seq/CNV/' + subdir + '/')
示例#10
0
def main(input_str):
    """
    Main function takes string input and returns the best results depending
    on scoring. Single result include sh-miR sequence,
    score and link to 2D structure from mfold program
    """
    sequence = check_input(input_str)
    seq1, seq2, shift_left, shift_right = sequence
    if not seq2:
        seq2 = reverse_complement(seq1)
    all_frames = get_all()
    if 'error' in all_frames:  #database error handler
        return all_frames

    frames = get_frames(seq1, seq2, shift_left, shift_right, all_frames)
    original_frames = [Backbone(**elem) for elem in all_frames]

    frames_with_score = []
    for frame_tuple, original in zip(frames, original_frames):
        score = 0
        frame, insert1, insert2 = frame_tuple
        mfold_data = mfold(frame.template(insert1, insert2))
        if 'error' in mfold_data:
            return mfold_data
        pdf, ss = mfold_data[0], mfold_data[1]
        score += score_frame(frame_tuple, ss, original)
        score += score_homogeneity(original)
        score += two_same_strands_score(seq1, original)
        frames_with_score.append(
            (score, frame.template(insert1, insert2), frame.name, pdf))

    sorted_frames = [elem for elem in sorted(frames_with_score,\
                        key=lambda x: x[0], reverse=True) if elem[0] > 60]
    return {'result': sorted_frames[:3]}
示例#11
0
 def test_input(self):
     """Tests for check_input function"""
     tests = [
         ('acggctTggaacttctggtac', ['acggcttggaacttctggtac', '', 0, 0]),
         ('acggcttGGaacttctggtac gtaccagaagttccaagccgt', [utils.check_complementary('acggcttggaacttctggtac', 'gtaccagaagttccaagccgt')]),
         ('acggcttggAActuctggtac gtaccagaagttccaagccgt', [utils.check_complementary('acggcttggaacttctggtac', 'gtaccagaagttccaagccgt')]),
         ('acggctTggaacttctggtTT', ['acggcttggaacttctggt', '', 0, 0])]
     for list1, expected in tests:
         self.failUnlessEqual(utils.check_input(list1), expected)
示例#12
0
def run_task(config, config_primary, config_subs, models_df):
    df_name, max_len, train_ratio, text_feature,\
        model_name, models_path, embeddings_version, \
        embeddings_path, task_type = utils.read_config_main(config)
    utils.check_input(model_name, task_type)
    if task_type == 'train_primary':
        models_df = primary_networks.train_primary(
            model_name, df_name, max_len, train_ratio, text_feature,
            embeddings_version, embeddings_path, config_primary, models_path,
            models_df)
        models_df.to_csv(models_path + 'models_df.csv')
    elif task_type == "train_sub":
        models_df = sub_networks.train_sub_models(model_name, df_name, max_len,
                                                  train_ratio, text_feature,
                                                  embeddings_version,
                                                  embeddings_path, config_subs,
                                                  models_path, models_df)
        models_df.to_csv(models_path + 'models_df.csv')
    elif task_type == "classify_sub":
        sub_networks.classify_sub_models(model_name, max_len, text_feature,
                                         embeddings_version, embeddings_path,
                                         models_path)
    elif task_type == "test":
        ACRCNN.run_test(models_path)
示例#13
0
def get_title():
    """
    This method should find a post by its content
    :return: a post
    """
    param_title = request.args.get('title', None)
    query_title = {
        'title': param_title
    }
    if check_input(param_title):
        find_title = news.find_one(query_title)
        if find_title:
            page_size = request.args.get('page_size', '0')
            page_number = request.args.get('page_number', '0')
            skips = int(page_size) * int(page_number)
            find_data = list(news.find(query_title, {'_id': 0}).skip(skips).limit(int(page_size)))
            return send_result(find_data)
        return send_error(code=404, message="Title doesn't exist")
    return  send_error(code=406, message="Invalid title")
示例#14
0
                    choice = int ( raw_input('Enter your choice [1-5] : ') )
                    is_valid = 1
            except ValueError, e :
                    print ("'%s' is not a valid integer." % e.args[0].split(": ")[1])

    if choice == 1:
            print (60 * '-')
            print(" For this configuration 4 nodes are recommended ")
            print( " With a minimum of 3 required.")
            print (60 * '-')
            avail_hosts = int(raw_input("How many nodes are available ?:  "))

            if avail_hosts >= 3:

                    app_hosts =  raw_input("What hosts will be used for application storage (IP/FQDN) ?:  ")
                    app_hosts =  utils.check_input(app_hosts)
                    utils.min_hosts(app_hosts)
                    utils.host_not_valid(app_hosts)
                    raw_devices = raw_input("What are the raw storage devices for these hosts (/dev/<device>) ?: ")
                    raw_devices = utils.check_input(raw_devices)
                    raw_storage_size = int(raw_input("What is the size of each raw storage device (GB) ?: "))
                    registry_pvsize = int(raw_input("What is the size for the registry persistent volume (GB)?: "))


                    # Single cluster total storage calculation
                    cluster_storage = len(raw_devices) * raw_storage_size * len(app_hosts)
                    total_avail_store = cluster_storage / 3.0

                    print "# Cluster 1"
                    print "# Total Storage allocated (GB) = %d" % registry_pvsize
                    print "# Total Storage available (GB) = %d" % total_avail_store
with open(os.path.join(model_out_dir, "network.json"), 'w') as f:
    f.write(network.to_json())

# start training
scheduler = utils.Scheduler(schedules)
check_train_batch, check_validation_batch = True, True
for epoch in range(n_epochs):
    # update step sizes, learning rates
    scheduler.update_steps(epoch)
    K.set_value(network.optimizer.lr, scheduler.get_lr())

    # train on the training set
    losses_all, losses_vessel = [], []
    for filenames, imgs, vessels, segs in train_batch_fetcher():
        if check_train_batch:
            utils.check_input(imgs, segs, train_img_check_dir)
            #             utils.check_input(vessels, segs, train_img_check_dir)
            check_train_batch = False
        total_loss, loss_all, loss_vessel = network.train_on_batch(
            [imgs, vessels], [segs, segs[:, ::32, ::32, :]])
        losses_all += [loss_all] * len(filenames)
        losses_vessel += [loss_vessel] * len(filenames)
    print "loss_all: {}, loss_vessel: {}".format(np.mean(losses_all),
                                                 np.mean(losses_vessel))

    # evaluate on validation set
    if check_validation_batch:
        utils.check_input(val_imgs, val_masks, val_img_check_dir)
        #         utils.check_input(val_vessels, val_masks, val_img_check_dir)
        check_test_batch = False
    val_generated_masks_f_v, val_generate_masks_v = network.predict(
示例#16
0
p = f1.readline().strip().split()[0]
p = int(p)

donor_list = {}
past_donor_list =[]
past_yr = 0
while True:
	line = f0.readline().strip().split('|')
	if len(line) < 2:
		break
	
	inp = [line[0].strip(),line[7].strip(),line[10].strip(),line[13].strip(),line[14].strip(),line[15].strip()]

	
	# Check the input
	out = ut.check_input(inp)
	
	if out:

		current_yr = out[-1]

		if current_yr > past_yr:
			#pdb.set_trace()
			#push the existing donor name list to past donor list 
			past_donor_list += donor_list.keys()
			repeat_donor_list = {}
			recipient_list = []
			contribution = {}
			num_contribution = {}
			#Create a new Binary Tree to store the contribution
			T = bst.BST()
示例#17
0
# set iterator
training_set, validation_set = utils.split(fundus_dir, vessel_dir, grade_path, "DME", 1)
val_batch_fetcher = iterator_dme.ValidationBatchFetcher(validation_set, batch_size, "DME", "rescale")

# create networks
K.set_learning_phase(False)
EX_segmentor = utils.load_network(EX_segmentor_dir)
fovea_localizer = utils.load_network(fovea_localizer_dir)
od_segmentor = utils.load_network(od_segmentor)

# start inference
check_train_batch, check_validation_batch = True, True
list_grades, list_features, list_fnames = [], [], []
for fnames, imgs, imgs_z, vessels, grades_onehot  in val_batch_fetcher():
    if check_validation_batch:
        utils.check_input(imgs, imgs_z, vessels, val_img_check_dir)
        check_validation_batch = False
    segmented = EX_segmentor.predict(imgs, batch_size=batch_size, verbose=0)
    fovea_loc, fovea_loc_vessel = fovea_localizer.predict([imgs_z, vessels], batch_size=batch_size, verbose=0)
    od_seg, od_seg_vessel = od_segmentor.predict([imgs_z, vessels], batch_size=batch_size, verbose=0)
    
    true_grades = np.argmax(grades_onehot, axis=1).tolist()
    features = utils.extract_features(segmented, od_seg, fovea_loc)
    list_fnames += [os.path.basename(fname).replace(".tif", "") for fname in fnames.tolist()]
    list_grades += true_grades
    list_features.append(features)
    if FLAGS.save_fig:
        utils.save_figs_for_region_seg_check(segmented, od_seg, fovea_loc, true_grades, img_out_dir, fnames)

features_matrix = np.concatenate(list_features, axis=0)
out_dict = {}
    f.write(network.to_json())

# start training
scheduler = utils.Scheduler(schedules)
check_train_batch, check_validation_batch = True, True
best_aupr = 0
for epoch in range(n_epochs):
    # update step sizes, learning rates
    scheduler.update_steps(epoch)
    K.set_value(network.optimizer.lr, scheduler.get_lr())
    
    # train on the training set
    losses = []
    for imgs, segs in train_batch_fetcher():
        if check_train_batch:
            utils.check_input(imgs, segs, train_img_check_dir)
            check_train_batch = False
        loss = network.train_on_batch(imgs, segs)
        losses += [loss] * imgs.shape[0]
    utils.print_metrics(epoch + 1, training_loss=np.mean(losses))
    
    # evaluate on the validation set
    if epoch in validation_epochs:
        losses, fundus_imgs, gt_masks, pred_masks = [], [], [], []
        for imgs, segs in val_batch_fetcher():
            if check_validation_batch:
                utils.check_input(imgs, segs, train_img_check_dir)
                check_validation_batch = False
            pred = network.predict(imgs, batch_size=batch_size, verbose=0)
            loss = network.evaluate(imgs, segs, batch_size=batch_size, verbose=0)
            losses += [loss] * imgs.shape[0]
示例#19
0
 def test_input_exceptions(self):
     """Tests for check_input Exceptions"""
     with self.assertRaises(errors.InputException) as err:
         utils.check_input('acggcttggaactuct')
     self.assertEqual(errors.len_error, str(err.exception))
     with self.assertRaises(errors.InputException) as err:
         utils.check_input('')
     self.assertEqual(errors.len_error, str(err.exception))
     with self.assertRaises(errors.InputException) as err:
         utils.check_input('acttctggtacTTUUUUUUuuuuuuGGG')
     self.assertEqual(errors.len_error, str(err.exception))
     with self.assertRaises(errors.InputException) as err:
         utils.check_input('acggcttGGaacttctggtac gtaccagaagttccaagccgt '\
             'acggcttGGaacttctggtac')
     self.assertEqual(errors.error, str(err.exception))
     with self.assertRaises(errors.InputException) as err:
         utils.check_input('acggcttGGaacttctggtac tgccgaaccttgaagaccatg')
     self.assertEqual(errors.error, str(err.exception))
     with self.assertRaises(errors.InputException) as err:
         utils.check_input('acggctTggactggtwacTT')
     self.assertEqual(errors.patt_error, str(err.exception))
val_batch_fetcher = iterator_dme.ValidationBatchFetcher(
    validation_set, batch_size, FLAGS.grade_type)

# create networks
K.set_learning_phase(False)
EX_segmentor = utils.load_network(EX_segmentor_dir)
fovea_localizer = utils.load_network(fovea_localizer_dir)
od_segmentor = utils.load_network(od_segmentor)

# start inference
check_train_batch, check_validation_batch = True, True
list_grades, list_od_found, list_sum_intensity_inside, list_sum_intensity_outside, list_fnames = [], [], [], [], []
for fnames, imgs_mean_subt, imgs_z, vessels, grades_onehot in val_batch_fetcher(
):
    if check_validation_batch:
        utils.check_input(imgs_mean_subt, imgs_z, vessels, val_img_check_dir)
        check_validation_batch = False
    segmented = EX_segmentor.predict(imgs_mean_subt,
                                     batch_size=batch_size,
                                     verbose=0)
    fovea_loc, fovea_loc_vessel = fovea_localizer.predict(
        [imgs_z, vessels], batch_size=batch_size, verbose=0)
    od_seg, od_seg_vessel = od_segmentor.predict([imgs_z, vessels],
                                                 batch_size=batch_size,
                                                 verbose=0)

    true_grades = np.argmax(grades_onehot, axis=1).tolist()
    od_found, sum_intensity_inside, sum_intensity_outside = utils.extract_features(
        segmented, od_seg, fovea_loc)

    list_fnames += [
示例#21
0
          + subdir + '/')
os.system('rm working/cnv_seq/CNV/' 
          + subdir + '/' 
          + input.split('.coverage')[0]  + '.cnvs')
with open(rtmp, 'w') as f:
    f.write("source('funcs.R')\n")
    f.write('library(cnv)\n')
    f.write("data<-read.delim('"
            + input + "')\n")
    f.write("png('" + png + "')\n")
    f.write("plot.cnv.all.perry(data,colour=9)\n")
    f.write('dev.off()\n')
    f.write("cnv.print(data, file='working/cnv_seq/CNV/" + subdir + '/' + input.split('.coverage')[0]  + ".cnvs')\n")
    f.write('q()\n')

if utils.check_input(input):
    os.system('R CMD BATCH --vanilla ' + rtmp + ' tmpLog')

# Murim's plot
with open(rtmp, 'w') as f:
    f.write("source('funcs.R')\n")
    f.write("data<-read.delim('"
            + murim_input + "')\n")
    f.write("png('" + png.replace('png', 'murim.png') + "')\n")
    f.write("plot.murim(data,colour=9)\n")
    f.write('dev.off()\n')
    f.write('q()\n')

if utils.check_input(murim_input):
    os.system('R CMD BATCH --vanilla ' + rtmp + ' tmpLog')