def test_main(self): in_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_slice_in.gct") rid_grp_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_slice_rid.grp") out_name = os.path.join(FUNCTIONAL_TESTS_DIR, "test_slice_out.gct") expected_out_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_slice_expected.gct") args_string = "-i {} --rid {} -ec {} -o {}".format( in_gct_path, rid_grp_path, "f", out_name) args = slice_gct.build_parser().parse_args(args_string.split()) # Run main method slice_gct.main(args) # Compare output to expected out_gct = pg.parse(out_name) expected_gct = pg.parse(expected_out_path) pd.util.testing.assert_frame_equal(out_gct.data_df, expected_gct.data_df) pd.util.testing.assert_frame_equal(out_gct.row_metadata_df, expected_gct.row_metadata_df) pd.util.testing.assert_frame_equal(out_gct.col_metadata_df, expected_gct.col_metadata_df) # Clean up os.remove(out_name)
def test_concat_main(self): test_dir = "../functional_tests/test_concat/test_main" g_a = pg.parse(os.path.join(test_dir, "a.gct")) logger.debug("g_a: {}".format(g_a)) g_b = pg.parse(os.path.join(test_dir, "b.gct")) logger.debug("g_b: {}".format(g_b)) #unhappy path - write out error report file expected_output_file = tempfile.mkstemp()[1] logger.debug( "unhappy path - write out error report file - expected_output_file: {}" .format(expected_output_file)) args = cg.build_parser().parse_args([ "-d", "horiz", "-if", g_a.src, g_b.src, "-o", "should_not_be_used", "-ot", "gct", "-erof", expected_output_file ]) logger.debug("args: {}".format(args)) with self.assertRaises( cg.MismatchCommonMetadataConcatException) as context: cg.concat_main(args) self.assertTrue(os.path.exists(expected_output_file)) report_df = pd.read_csv(expected_output_file, sep="\t") logger.debug("report_df:\n{}".format(report_df)) self.assertEqual(2, report_df.shape[0]) os.remove(expected_output_file) #happy path expected_output_file = tempfile.mkstemp(suffix=".gct")[1] logger.debug("happy path - expected_output_file: {}".format( expected_output_file)) args2 = cg.build_parser().parse_args([ "-d", "horiz", "-if", g_a.src, g_b.src, "-o", expected_output_file, "-ot", "gct", "-ramf" ]) logger.debug("args2: {}".format(args2)) cg.concat_main(args2) self.assertTrue(os.path.exists(expected_output_file)) r = pg.parse(expected_output_file) logger.debug("happy path -r:\n{}".format(r)) logger.debug("r.data_df:\n{}".format(r.data_df)) self.assertEqual((2, 4), r.data_df.shape) self.assertEqual({"a", "b", "g", "f"}, set(r.data_df.columns)) self.assertEqual({"rid1", "rid2"}, set(r.data_df.index)) #cleanup os.remove(expected_output_file)
def test_top_bottom(self): top_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_merge_top.gct") bottom_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_merge_bottom.gct") expected_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_merged_top_bottom.gct") top_gct = pg.parse(top_gct_path) bottom_gct = pg.parse(bottom_gct_path) expected_gct = pg.parse(expected_gct_path) # Merge top and bottom concated_gct = cg.vstack([top_gct, bottom_gct], [], False) pd.util.testing.assert_frame_equal(expected_gct.data_df, concated_gct.data_df, check_names=False) pd.util.testing.assert_frame_equal(expected_gct.row_metadata_df, concated_gct.row_metadata_df, check_names=False) pd.util.testing.assert_frame_equal(expected_gct.col_metadata_df, concated_gct.col_metadata_df, check_names=False)
def test_left_right(self): left_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_merge_left.gct") right_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_merge_right.gct") expected_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_merged_left_right.gct") left_gct = pg.parse(left_gct_path) right_gct = pg.parse(right_gct_path) expected_gct = pg.parse(expected_gct_path) # Merge left and right concated_gct = cg.hstack([left_gct, right_gct], [], False) pd.util.testing.assert_frame_equal(expected_gct.data_df, concated_gct.data_df, check_names=False) pd.util.testing.assert_frame_equal(expected_gct.row_metadata_df, concated_gct.row_metadata_df, check_names=False) pd.util.testing.assert_frame_equal(expected_gct.col_metadata_df, concated_gct.col_metadata_df, check_names=False)
def test_main(self): out_name = os.path.join(FUNCTIONAL_TESTS_PATH, "test_main_out.gct") gctoo = GCToo.GCToo(data_df=self.data_df, row_metadata_df=self.row_metadata_df, col_metadata_df=self.col_metadata_df) wg.write(gctoo, out_name, data_null="NaN", metadata_null="-666", filler_null="-666") # Read in the gct and verify that it's the same as gctoo new_gct = pg.parse(out_name) pd.util.testing.assert_frame_equal(new_gct.data_df, gctoo.data_df) pd.util.testing.assert_frame_equal(new_gct.row_metadata_df, gctoo.row_metadata_df) pd.util.testing.assert_frame_equal(new_gct.col_metadata_df, gctoo.col_metadata_df) # Also check that missing values were written to the file as expected in_df = pd.read_csv(out_name, sep="\t", skiprows=2, keep_default_na=False) self.assertEqual(in_df.iloc[0, 1], "-666") self.assertEqual(in_df.iloc[5, 6], "NaN") # Cleanup os.remove(out_name)
def test_parse_gct_int_ids(self): path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_parse_gct_int_ids.gct") r = pg.parse(path) logger.debug("r: {}".format(r)) logger.debug("r.data_df: {}".format(r.data_df)) self.assertEqual({"1", "2"}, set(r.data_df.columns)) self.assertEqual({"3", "11", "-3"}, set(r.data_df.index))
def test_p100_functional(self): p100_in_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100.gct") p100_out_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100_writing.gct") # Read in original gct file p100_in_gct = pg.parse(p100_in_path) # Read in new gct file wg.write(p100_in_gct, p100_out_path) p100_out_gct = pg.parse(p100_out_path) self.assertTrue(p100_in_gct.data_df.equals(p100_out_gct.data_df)) self.assertTrue(p100_in_gct.row_metadata_df.equals(p100_out_gct.row_metadata_df)) self.assertTrue(p100_in_gct.col_metadata_df.equals(p100_out_gct.col_metadata_df)) # Clean up os.remove(p100_out_path)
def test_with_only_row_metadata(self): # path to files gctoo_path = FUNCTIONAL_TESTS_PATH + "/row_meta_only_example_n2x1203.gct" gctoox_path = FUNCTIONAL_TESTS_PATH + "/row_meta_only_example_n2x1203.gctx" # parse files c2_gctoo = parse_gct.parse(gctoo_path) c2_gctoox = parse_gctx.parse(gctoox_path) # check rows and columns: data_df self.assertTrue(set(list(c2_gctoo.data_df.index)) == set(list(c2_gctoox.data_df.index)), "Mismatch between data_df index values of gct vs gctx: {} vs {}".format(c2_gctoo.data_df.index, c2_gctoox.data_df.index)) self.assertTrue(set(list(c2_gctoo.data_df.columns)) == set(list(c2_gctoox.data_df.columns)), "Mismatch between data_df column values of gct vs gctx: {} vs {}".format( c2_gctoo.data_df.columns, c2_gctoox.data_df.columns)) logger.debug("c2 gctoo data_df columns equal to gctoox data_df columns? {}".format( set(c2_gctoo.data_df.columns) == set(c2_gctoox.data_df.columns))) for c in list(c2_gctoo.data_df.columns): self.assertTrue(len(list(c2_gctoo.data_df[c])) == len(list(c2_gctoox.data_df[c])), "Lengths of column {} differ between gct and gctx".format(c)) pandas_testing.assert_series_equal(c2_gctoo.data_df[c], c2_gctoox.data_df[c]) # check rows and columns: row_metadata_df self.assertTrue(set(list(c2_gctoo.row_metadata_df.index)) == set(list(c2_gctoox.row_metadata_df.index)), "Mismatch between row_metadata_df index values of gct vs gctx: {} vs {}".format( c2_gctoo.row_metadata_df.index, c2_gctoox.row_metadata_df.index)) self.assertTrue(set(list(c2_gctoo.row_metadata_df.columns)) == set(list(c2_gctoox.row_metadata_df.columns)), "Mismatch between row_metadata_df column values of gct vs gctx: {} vs {}".format( c2_gctoo.row_metadata_df.columns, c2_gctoox.row_metadata_df.columns)) logger.debug("c2 gctoo row_metadata_df columns equal to gctoox row_metadata_df columns? {}".format( set(c2_gctoo.row_metadata_df.columns) == set(c2_gctoox.row_metadata_df.columns))) for c in list(c2_gctoo.row_metadata_df.columns): self.assertTrue(len(list(c2_gctoo.row_metadata_df[c])) == len(list(c2_gctoox.row_metadata_df[c])), "Lengths of column {} differ between gct and gctx".format(c)) self.assertTrue(c2_gctoo.row_metadata_df[c].dtype == c2_gctoox.row_metadata_df[c].dtype, "Dtype mismatch between parsed gct & gctx: {} vs {}".format( c2_gctoo.row_metadata_df[c].dtype, c2_gctoox.row_metadata_df[c].dtype)) logger.debug("first couple elems of {} in gctoo: {}".format(c, list(c2_gctoo.row_metadata_df[c])[0:3])) pandas_testing.assert_series_equal(c2_gctoo.row_metadata_df[c], c2_gctoox.row_metadata_df[c]) # check rows and columns: col_metadata_df self.assertTrue(set(list(c2_gctoo.col_metadata_df.index)) == set(list(c2_gctoox.col_metadata_df.index)), "Mismatch between col_metadata_df index values of gct vs gctx: {} vs {}".format( c2_gctoo.col_metadata_df.index, c2_gctoox.col_metadata_df.index)) self.assertTrue(set(list(c2_gctoo.col_metadata_df.columns)) == set(list(c2_gctoox.col_metadata_df.columns)), "Mismatch between col_metadata_df column values of gct vs gctx: {} vs {}".format( c2_gctoo.col_metadata_df.columns, c2_gctoox.col_metadata_df.columns)) logger.debug("c2 gctoo col_metadata_df columns equal to gctoox col_metadata_df columns? {}".format( set(c2_gctoo.col_metadata_df.columns) == set(c2_gctoox.col_metadata_df.columns))) for c in list(c2_gctoo.col_metadata_df.columns): self.assertTrue(len(list(c2_gctoo.col_metadata_df[c])) == len(list(c2_gctoox.col_metadata_df[c])), "Lengths of column {} differ between gct and gctx".format(c)) self.assertTrue(c2_gctoo.col_metadata_df[c].dtype == c2_gctoox.col_metadata_df[c].dtype, "Dtype mismatch between parsed gct & gctx: {} vs {}".format( c2_gctoo.col_metadata_df[c].dtype, c2_gctoox.col_metadata_df[c].dtype)) pandas_testing.assert_series_equal(c2_gctoo.col_metadata_df[c], c2_gctoox.col_metadata_df[c])
def test_gctx2gct_main(self): in_name = "../functional_tests/mini_gctoo_for_testing.gctx" out_name = "../functional_tests/test_gctx2gct_out.gct" args_string = "-f {} -o {}".format(in_name, out_name) args = gctx2gct.build_parser().parse_args(args_string.split()) gctx2gct.gctx2gct_main(args) # Make sure the input is identical to output in_gctx = parse_gctx.parse(in_name) out_gct = parse_gct.parse(out_name) pd.util.testing.assert_frame_equal(in_gctx.data_df, out_gct.data_df, check_less_precise=3) pd.util.testing.assert_frame_equal(in_gctx.col_metadata_df, out_gct.col_metadata_df) pd.util.testing.assert_frame_equal(in_gctx.row_metadata_df, out_gct.row_metadata_df) no_meta = "../functional_tests/mini_gctoo_for_testing_nometa.gctx" added_meta = "../functional_tests/test_gctx2gct_out_annotated.gct" row_meta = "../functional_tests/test_rowmeta_n6.txt" col_meta = "../functional_tests/test_colmeta_n6.txt" args_string = "-f {} -o {} -row_annot_path {} -col_annot_path {}".format( no_meta, added_meta, row_meta, col_meta) args = gctx2gct.build_parser().parse_args(args_string.split()) gctx2gct.gctx2gct_main(args) annotated_gct = parse_gct.parse(added_meta) # Check added annotations are the same as original input GCTX pd.util.testing.assert_frame_equal(in_gctx.data_df, annotated_gct.data_df, check_less_precise=3) pd.util.testing.assert_frame_equal(in_gctx.col_metadata_df, annotated_gct.col_metadata_df) pd.util.testing.assert_frame_equal(in_gctx.row_metadata_df, annotated_gct.row_metadata_df) # Clean up os.remove(out_name) os.remove(added_meta)
def parse(file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ Identifies whether file_path corresponds to a .gct or .gctx file and calls the correct corresponding parse method. Input: Mandatory: - gct(x)_file_path (str): full path to gct(x) file you want to parse. Optional: - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - myGCToo (GCToo) Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default. """ if file_path.endswith(".gct"): # Ignoring arguments that won't be passed to parse_gct for unused_arg in ["rid", "cid", "ridx", "cidx"]: if eval(unused_arg): err_msg = "parse_gct does not use the argument {}. Ignoring it...".format( unused_arg) logger.error(err_msg) raise Exception(err_msg) curr = parse_gct.parse(file_path, convert_neg_666, row_meta_only, col_meta_only, make_multiindex) elif file_path.endswith(".gctx"): curr = parse_gctx.parse(file_path, convert_neg_666, rid, cid, ridx, cidx, row_meta_only, col_meta_only, make_multiindex) else: err_msg = "File to parse must be .gct or .gctx!" logger.error(err_msg) raise Exception(err_msg) return curr
def test_parse(self): # L1000 gct l1000_file_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_l1000.gct") l1000_gct = pg.parse(l1000_file_path) # Check a few values self.assertAlmostEqual(l1000_gct.data_df.iloc[0, 0], 11.3819, places=4, msg=("The first value in the data matrix should be " + "{} not {}").format("11.3819", l1000_gct.data_df.iloc[0, 0])) self.assertEqual(l1000_gct.col_metadata_df.iloc[0, 0], 58, msg=("The first value in the column metadata should be " + "{} not {}").format("58", l1000_gct.col_metadata_df.iloc[0, 0])) self.assertEqual(l1000_gct.row_metadata_df.iloc[0, 0], "Analyte 11", msg=("The first value in the row metadata should be " + "{} not {}").format("Analyte 11", l1000_gct.row_metadata_df.iloc[0, 0])) # P100 gct p100_file_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100.gct") p100_gct = pg.parse(p100_file_path) # Check a few values self.assertAlmostEqual(p100_gct.data_df.iloc[0, 0], 0.9182, places=4, msg=("The first value in the data matrix should be " + "{} not {}").format("0.9182", p100_gct.data_df.iloc[0, 0])) self.assertEqual(p100_gct.col_metadata_df.iloc[0, 0], "MCF7", msg=("The first value in the column metadata should be " + "{} not {}").format("MCF7", p100_gct.col_metadata_df.iloc[0, 0])) self.assertEqual(p100_gct.row_metadata_df.iloc[0, 0], 1859, msg=("The first value in the row metadata should be " + "{} not {}").format("1859", p100_gct.row_metadata_df.iloc[0, 0])) # GCT1.2 gct_v1point2_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_v1point2_n5x10.gct") gct_v1point2 = pg.parse(gct_v1point2_path) # Check a few values self.assertAlmostEqual( gct_v1point2.data_df.loc["217140_s_at", "LJP005_A375_24H_X1_B19:A06"], 6.9966, places=4) self.assertEqual(gct_v1point2.row_metadata_df.loc["203627_at", "Description"], "IGF1R") # Make sure col_metadata_df is empty self.assertEqual(gct_v1point2.col_metadata_df.size, 0, "col_metadata_df should be empty.")
def parse(file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ Identifies whether file_path corresponds to a .gct or .gctx file and calls the correct corresponding parse method. Input: Mandatory: - gct(x)_file_path (str): full path to gct(x) file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - out (GCToo object or pandas df): if row_meta_only or col_meta_only, then out is a metadata df; otherwise, it's a GCToo instance containing content of parsed gct(x) file Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default. """ if file_path.endswith(".gct"): out = parse_gct.parse(file_path, convert_neg_666=convert_neg_666, rid=rid, cid=cid, ridx=ridx, cidx=cidx, row_meta_only=row_meta_only, col_meta_only=col_meta_only, make_multiindex=make_multiindex) elif file_path.endswith(".gctx"): out = parse_gctx.parse(file_path, convert_neg_666=convert_neg_666, rid=rid, cid=cid, ridx=ridx, cidx=cidx, row_meta_only=row_meta_only, col_meta_only=col_meta_only, make_multiindex=make_multiindex) else: err_msg = "File to parse must be .gct or .gctx!" logger.error(err_msg) raise Exception(err_msg) return out
def gct2gctx_main(args): """ Separate from main() in order to make command-line tool. """ in_gctoo = parse_gct.parse(args.filename, convert_neg_666=False) if args.output_filepath is None: basename = os.path.basename(args.filename) out_name = os.path.splitext(basename)[0] + ".gctx" else: out_name = args.output_filepath write_gctx.write(in_gctoo, out_name)
def main(): args = build_parser().parse_args(sys.argv[1:]) setup_logger.setup(verbose=args.verbose) in_gctoo = parse_gct.parse(args.filename, convert_neg_666=False) logger.debug("Original out name: {}".format(in_gctoo.src)) if args.output_filepath == None: out_name = str.split(in_gctoo.src, "/")[-1].split(".")[0] else: out_name = args.output_filepath write_gctx.write(in_gctoo, out_name)
def test_l1000_functional(self): l1000_in_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_l1000.gct") l1000_out_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_l1000_writing.gct") # Read in original gct file l1000_in_gct = pg.parse(l1000_in_path) # do write operation wg.write(l1000_in_gct, l1000_out_path) # Read in new gct file l1000_out_gct = pg.parse(l1000_out_path) pd.testing.assert_frame_equal(l1000_in_gct.data_df, l1000_out_gct.data_df) pd.testing.assert_frame_equal(l1000_in_gct.row_metadata_df, l1000_out_gct.row_metadata_df) pd.testing.assert_frame_equal(l1000_in_gct.col_metadata_df, l1000_out_gct.col_metadata_df) # Clean up os.remove(l1000_out_path)
def test_p100_functional(self): p100_in_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100.gct") p100_out_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100_writing.gct") # Read in original gct file p100_in_gct = pg.parse(p100_in_path) # do write operation - note data_float_format set to None to preserve precision of input file wg.write(p100_in_gct, p100_out_path, data_float_format=None) # Read in new gct file p100_out_gct = pg.parse(p100_out_path) pd.testing.assert_frame_equal(p100_in_gct.data_df, p100_out_gct.data_df) pd.testing.assert_frame_equal(p100_in_gct.row_metadata_df, p100_out_gct.row_metadata_df) pd.testing.assert_frame_equal(p100_in_gct.col_metadata_df, p100_out_gct.col_metadata_df) # Clean up os.remove(p100_out_path)
def test_gct2gctx_main(self): in_name = "functional_tests/mini_gctoo_for_testing.gct" out_name = "functional_tests/test_gct2gctx_out.gctx" args_string = "-f {} -o {}".format(in_name, out_name) args = gct2gctx.build_parser().parse_args(args_string.split()) gct2gctx.gct2gctx_main(args) # Make sure the input is identical to output in_gct = parse_gct.parse(in_name) out_gctx = parse_gctx.parse(out_name) pd.util.testing.assert_frame_equal(in_gct.data_df, out_gctx.data_df) pd.util.testing.assert_frame_equal(in_gct.col_metadata_df, out_gctx.col_metadata_df) pd.util.testing.assert_frame_equal(in_gct.row_metadata_df, out_gctx.row_metadata_df) # Clean up os.remove(out_name)
def main(): # get args args = build_parser().parse_args(sys.argv[1:]) setup_logger.setup(verbose=args.verbose) # Read the input gct in_gct = pg.parse(args.in_gct_path) # Read in each of the command line arguments rid = _read_arg(args.rid) cid = _read_arg(args.cid) exclude_rid = _read_arg(args.exclude_rid) exclude_cid = _read_arg(args.exclude_cid) # Slice the gct out_gct = slice_gctoo(in_gct, rid=rid, cid=cid, exclude_rid=exclude_rid, exclude_cid=exclude_cid) assert out_gct.data_df.size > 0, "Slicing yielded an empty gct!" # Write the output gct wg.write(out_gct, args.out_name, data_null="NaN", metadata_null="NA", filler_null="NA")
def subset_main(args): """ Separate method from main() in order to make testing easier and to enable command-line access. """ # Read in each of the command line arguments rid = _read_arg(args.rid) cid = _read_arg(args.cid) exclude_rid = _read_arg(args.exclude_rid) exclude_cid = _read_arg(args.exclude_cid) # If GCT, use subset_gctoo if args.in_path.endswith(".gct"): in_gct = parse_gct.parse(args.in_path) out_gct = sg.subset_gctoo(in_gct, rid=rid, cid=cid, exclude_rid=exclude_rid, exclude_cid=exclude_cid) # If GCTx, use parse_gctx else: if (exclude_rid is not None) or (exclude_cid is not None): msg = "exclude_{rid,cid} args not currently supported for parse_gctx." raise (Exception(msg)) logger.info("Using hyperslab selection functionality of parse_gctx...") out_gct = parse_gctx.parse(args.in_path, rid=rid, cid=cid) # Write the output gct if args.out_type == "gctx": wgx.write(out_gct, args.out_name) else: wg.write(out_gct, args.out_name, data_null="NaN", metadata_null="NA", filler_null="NA")
def test_parse_gct(self): # tests the parsing of a gct file with high precision values gct_filepath = os.path.join(FUNCTIONAL_TESTS_PATH, 'test_l1000_highprecision.gct') data_gct = parse_gct.parse(gct_filepath) (data, row_metadata, col_metadata) = (data_gct.data_df, data_gct.row_metadata_df, data_gct.col_metadata_df) e_dims = [978, 377, 11, 35] actual_version = 'GCT1.3' # Check shapes of outputs self.assertTrue(row_metadata.shape == (e_dims[0], e_dims[2]), ("row_metadata.shape = {} " + "but expected it to be ({}, {})").format(row_metadata.shape, e_dims[0], e_dims[2])) self.assertTrue(col_metadata.shape == (e_dims[1], e_dims[3]), ("col_metadata.shape = {} " + "but expected it to be ({}, {})").format(col_metadata.shape, e_dims[1], e_dims[3])) self.assertTrue(data.shape == (e_dims[0], e_dims[1]), ("data.shape = {} " + "but expected it to be ({}, {})").format(data.shape, e_dims[0], e_dims[1])) # Check version self.assertEqual(actual_version, data_gct.version) # Check the type of data self.assertTrue(isinstance(data.iloc[0, 0], np.float32), "The data should be a float32, not {}".format(type(data.iloc[0, 0]))) # Check a few high precision floating values in data correct_val = np.float32(11.574655) self.assertTrue(data.iloc[0, 0] == correct_val, ("The first value in the data matrix should be " + "{} not {}").format(correct_val, data.iloc[0, 0])) correct_val = np.float32(5.3183546) self.assertTrue(data.iloc[e_dims[0] - 1, e_dims[1] - 1] == correct_val, ("The last value in the data matrix should be " + str(correct_val) + " not {}").format(data.iloc[e_dims[0] - 1, e_dims[1] - 1]))
def gct2gctx_main(args): """ Separate from main() in order to make command-line tool. """ in_gctoo = parse_gct.parse(args.filename, convert_neg_666=False) if args.output_filepath is None: basename = os.path.basename(args.filename) out_name = os.path.splitext(basename)[0] + ".gctx" else: out_name = args.output_filepath """ If annotations are supplied, parse table and set metadata_df """ if args.row_annot_path is None: pass else: row_metadata = pd.read_csv(args.row_annot_path, sep='\t', index_col=0, header=0, low_memory=False) assert all(in_gctoo.data_df.index.isin(row_metadata.index)), \ "Row ids in matrix missing from annotations file" in_gctoo.row_metadata_df = row_metadata.loc[row_metadata.index.isin( in_gctoo.data_df.index)] if args.col_annot_path is None: pass else: col_metadata = pd.read_csv(args.col_annot_path, sep='\t', index_col=0, header=0, low_memory=False) assert all(in_gctoo.data_df.columns.isin(col_metadata.index)), \ "Column ids in matrix missing from annotations file" in_gctoo.col_metadata_df = col_metadata.loc[col_metadata.index.isin( in_gctoo.data_df.columns)] write_gctx.write(in_gctoo, out_name)
def test_main(self): test_dir = "functional_tests/test_concat_gctoo/test_main" g_a = pg.parse(os.path.join(test_dir, "a.gct")) logger.debug("g_a: {}".format(g_a)) g_b = pg.parse(os.path.join(test_dir, "b.gct")) logger.debug("g_b: {}".format(g_b)) save_build_parser = cg.build_parser class MockParser: def __init__(self, args): self.args = args def parse_args(self, unused): return self.args #unhappy path - write out error report file expected_output_file = tempfile.mkstemp()[1] logger.debug( "unhappy path - write out error report file - expected_output_file: {}" .format(expected_output_file)) args = save_build_parser().parse_args([ "-d", "horiz", "-if", g_a.src, g_b.src, "-o", "should_not_be_used", "-ot", "gct", "-erof", expected_output_file ]) logger.debug("args: {}".format(args)) my_mock_parser = MockParser(args) cg.build_parser = lambda: my_mock_parser with self.assertRaises( cg.MismatchCommonMetadataConcatGctooException) as context: cg.main() self.assertTrue(os.path.exists(expected_output_file)) report_df = pd.read_csv(expected_output_file, sep="\t") logger.debug("report_df:\n{}".format(report_df)) self.assertEqual(2, report_df.shape[0]) os.remove(expected_output_file) print() print() print() #happy path args.remove_all_metadata_fields = True args.error_report_output_file = None expected_output_file = tempfile.mkstemp(suffix=".gct")[1] logger.debug("happy path - expected_output_file: {}".format( expected_output_file)) args.out_name = expected_output_file my_mock_parser = MockParser(args) cg.buid_parser = lambda: my_mock_parser cg.main() self.assertTrue(os.path.exists(expected_output_file)) r = pg.parse(expected_output_file) logger.debug("happy path -r:\n{}".format(r)) logger.debug("r.data_df:\n{}".format(r.data_df)) self.assertEqual((2, 4), r.data_df.shape) self.assertEqual({"a", "b", "g", "f"}, set(r.data_df.columns)) self.assertEqual({"rid1", "rid2"}, set(r.data_df.index)) #cleanup os.remove(expected_output_file) cg.build_parser = save_build_parser
def test_parse(self): # L1000 gct l1000_file_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_l1000.gct") l1000_gct = pg.parse(l1000_file_path) logger.debug("parse L1000 gct - l1000_gct: {}".format(l1000_gct)) self.assertEqual(GCToo.GCToo, type(l1000_gct)) # Check a few values self.assertAlmostEqual(l1000_gct.data_df.iloc[0, 0], 11.3819, places=4, msg=("The first value in the data matrix should be " + "{} not {}").format("11.3819", l1000_gct.data_df.iloc[0, 0])) self.assertEqual(l1000_gct.col_metadata_df.iloc[0, 0], 58, msg=("The first value in the column metadata should be " + "{} not {}").format("58", l1000_gct.col_metadata_df.iloc[0, 0])) self.assertEqual(l1000_gct.row_metadata_df.iloc[0, 0], "Analyte 11", msg=("The first value in the row metadata should be " + "{} not {}").format("Analyte 11", l1000_gct.row_metadata_df.iloc[0, 0])) # P100 gct p100_file_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100.gct") p100_gct = pg.parse(p100_file_path) logger.debug("parse P100 gct - p100_gct: {}".format(p100_gct)) self.assertEqual(GCToo.GCToo, type(p100_gct)) # Check a few values self.assertAlmostEqual(p100_gct.data_df.iloc[0, 0], 0.9182, places=4, msg=("The first value in the data matrix should be " + "{} not {}").format("0.9182", p100_gct.data_df.iloc[0, 0])) self.assertEqual(p100_gct.col_metadata_df.iloc[0, 0], "MCF7", msg=("The first value in the column metadata should be " + "{} not {}").format("MCF7", p100_gct.col_metadata_df.iloc[0, 0])) self.assertEqual(p100_gct.row_metadata_df.iloc[0, 0], 1859, msg=("The first value in the row metadata should be " + "{} not {}").format("1859", p100_gct.row_metadata_df.iloc[0, 0])) # GCT1.2 gct_v1point2_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_v1point2_n5x10.gct") gct_v1point2 = pg.parse(gct_v1point2_path) logger.debug("parse GC1.2 - gct_v1point2: {}".format(gct_v1point2)) self.assertEqual(GCToo.GCToo, type(gct_v1point2)) # Check a few values self.assertAlmostEqual( gct_v1point2.data_df.loc["217140_s_at", "LJP005_A375_24H_X1_B19:A06"], 6.9966, places=4) self.assertEqual(gct_v1point2.row_metadata_df.loc["203627_at", "Description"], "IGF1R") # Make sure col_metadata_df is empty self.assertEqual(gct_v1point2.col_metadata_df.size, 0, "col_metadata_df should be empty.") #row_meta_only=True row_metadata_df = pg.parse(l1000_file_path, row_meta_only=True) logger.debug("row_meta_only=True - row_metadata_df.shape: {}".format(row_metadata_df.shape)) self.assertEqual(pd.DataFrame, type(row_metadata_df)) self.assertEqual((978, 11), row_metadata_df.shape) #col_meta_only=True col_metadata_df = pg.parse(l1000_file_path, col_meta_only=True) logger.debug("col_meta_only=True - col_metadata_df.shape: {}".format(col_metadata_df.shape)) self.assertEqual(pd.DataFrame, type(col_metadata_df)) self.assertEqual((377, 35), col_metadata_df.shape)
def eVIP_run_main(pred_file=None, sig_info=None, gctx=None, sig_gctx=None, ref_allele_mode=None, null_conn=None, out_dir=None, ymin=None, ymax=None, allele_col=None, use_c_pval=None, pdf=None, cell_id=None, plate_id=None, corr_val_str=None): #setting default values ymin = int(ymin) if ymin != None else int(-1.00) ymax = int(ymax) if ymax != None else int(1.00) pred_file = open(pred_file) pred_col = "prediction" if os.path.exists(out_dir): out_dir = os.path.abspath(out_dir) else: os.mkdir(out_dir) out_dir = os.path.abspath(out_dir) sig_info = open(sig_info) null_conn = getNullConnDist(null_conn) this_gctx = gct.parse(gctx) sig_gctx = gct.parse(sig_gctx) (gene2wt, gene2allele_call, gene2num_alleles, allele2pvals) = parse_pred_file(pred_file, pred_col, use_c_pval, ref_allele_mode) allele2distil_ids = parse_sig_info(sig_info, allele_col, cell_id, plate_id) for gene in gene2wt: this_fig = plt.figure() this_fig.set_size_inches((gene2num_alleles[gene] + 1) * 4, 4 * 3) grid_size = (4, gene2num_alleles[gene] + 1) wt_heatmap_ax = plt.subplot2grid(grid_size, (0, 0)) wt_im = plot_rep_heatmap(wt_heatmap_ax, this_gctx.data_df, allele2distil_ids[gene2wt[gene]], allele2distil_ids[gene2wt[gene]], gene2wt[gene], ymin, ymax) # WT self connectivity wt_self, wt_self_row_medians = getSelfConnectivity( this_gctx, allele2distil_ids[gene2wt[gene]], len(allele2distil_ids[gene2wt[gene]])) # Create consistent x values for the wt reps when plotting wt_x_vals = [] for val in wt_self_row_medians: wt_x_vals.append(random.randint(WT_RANGE[0], WT_RANGE[1])) # Plot color bar on this axis plt.colorbar(wt_im, ax=wt_heatmap_ax, shrink=0.7) # Plot allele data col_counter = 1 for type in PRED_TYPE: for allele in gene2allele_call[gene][type]: # CREATE SCATTERPLOT FIGURE plot_signatures(pdf, out_dir, sig_gctx.data_df, gene2wt[gene], allele, allele2distil_ids[gene2wt[gene]], allele2distil_ids[allele]) # PLOT HEATMAP this_hm_ax = plt.subplot2grid(grid_size, (0, col_counter)) plot_rep_heatmap(this_hm_ax, this_gctx.data_df, allele2distil_ids[allele], allele2distil_ids[allele], type + " - " + allele, ymin, ymax) # PLOT WT MUT heatmap this_wt_mut_ax = plt.subplot2grid(grid_size, (1, col_counter)) plot_rep_heatmap(this_wt_mut_ax, this_gctx.data_df, allele2distil_ids[gene2wt[gene]], allele2distil_ids[allele], gene2wt[gene] + " vs " + allele, ymin, ymax) # PLOT RANKPOINT ROWS this_jitter_ax = plt.subplot2grid(grid_size, (2, col_counter)) mut_self, mt_self_row_medians = getSelfConnectivity( this_gctx, allele2distil_ids[allele], len(allele2distil_ids[allele])) wt_mut, wt_mut_row_medians = getConnectivity( this_gctx, allele2distil_ids[gene2wt[gene]], allele2distil_ids[allele], len(allele2distil_ids[allele])) plot_jitter( this_jitter_ax, col_counter, wt_x_vals, wt_self_row_medians, mt_self_row_medians, wt_mut_row_medians, # null_x_vals, # null_conn, allele2pvals[allele][0], allele2pvals[allele][1], use_c_pval, ymin, ymax, corr_val_str) # Compared to random connectivity conn_ax = plt.subplot2grid(grid_size, (3, col_counter)) plot_conn(conn_ax, col_counter, null_conn, wt_mut_row_medians, allele2pvals[allele][2], use_c_pval, corr_val_str) col_counter += 1 if pdf: this_fig.savefig("%s/%s_impact_pred_plots.pdf" % (out_dir, gene), format="pdf") else: this_fig.savefig("%s/%s_impact_pred_plots.png" % (out_dir, gene)) plt.close(this_fig)
def test_with_both_metadata_fields(self): # path to files gctoo_path = FUNCTIONAL_TESTS_PATH + "/both_metadata_example_n1476x978.gct" gctoox_path = FUNCTIONAL_TESTS_PATH + "/both_metadata_example_n1476x978.gctx" # parse files c1_gctoo = parse_gct.parse(gctoo_path) c1_gctoox = parse_gctx.parse(gctoox_path) # check rows and columns: data_df self.assertTrue(set(list(c1_gctoo.data_df.index)) == set(list(c1_gctoox.data_df.index)), "Mismatch between data_df index values of gct vs gctx: {} vs {}".format(c1_gctoo.data_df.index, c1_gctoox.data_df.index)) self.assertTrue(set(list(c1_gctoo.data_df.columns)) == set(list(c1_gctoox.data_df.columns)), "Mismatch between data_df column values of gct vs gctx: {} vs {}".format( c1_gctoo.data_df.columns, c1_gctoox.data_df.columns)) logger.debug("c1 gctoo data_df columns equal to gctoox data_df columns? {}".format( set(c1_gctoo.data_df.columns) == set(c1_gctoox.data_df.columns))) for c in list(c1_gctoo.data_df.columns): # logger.debug("Comparing data values in Column: {}".format(c)) self.assertTrue(len(list(c1_gctoo.data_df[c])) == len(list(c1_gctoox.data_df[c])), "Lengths of column {} differ between gct and gctx".format(c)) pandas_testing.assert_series_equal(c1_gctoo.data_df[c], c1_gctoox.data_df[c]) # check rows and columns: row_metadata_df self.assertTrue(set(list(c1_gctoo.row_metadata_df.index)) == set(list(c1_gctoox.row_metadata_df.index)), "Mismatch between row_metadata_df index values of gct vs gctx: {} vs {}".format( c1_gctoo.row_metadata_df.index, c1_gctoox.row_metadata_df.index)) self.assertTrue(set(list(c1_gctoo.row_metadata_df.columns)) == set(list(c1_gctoox.row_metadata_df.columns)), "Mismatch between row_metadata_df column values of gct vs gctx: difference is {}".format( set(c1_gctoo.row_metadata_df.columns).symmetric_difference( set(c1_gctoox.row_metadata_df.columns)))) logger.debug("c1 gctoo row_metadata_df columns equal to gctoox row_metadata_df columns? {}".format( set(c1_gctoo.row_metadata_df.columns) == set(c1_gctoox.row_metadata_df.columns))) logger.debug("c1 gctoo dtypes: {}".format(c1_gctoo.row_metadata_df.dtypes)) logger.debug("c1 gctoox dtypes: {}".format(c1_gctoox.row_metadata_df.dtypes)) for c in list(c1_gctoo.row_metadata_df.columns): self.assertTrue(len(list(c1_gctoo.row_metadata_df[c])) == len(list(c1_gctoox.row_metadata_df[c])), "Lengths of column {} differ between gct and gctx".format(c)) logger.debug("first couple elems of {} in gctoo: {}".format(c, list(c1_gctoo.row_metadata_df[c])[0:3])) self.assertTrue(c1_gctoo.row_metadata_df[c].dtype == c1_gctoox.row_metadata_df[c].dtype, "Dtype mismatch for {} between parsed gct & gctx: {} vs {}".format(c, c1_gctoo.row_metadata_df[ c].dtype, c1_gctoox.row_metadata_df[ c].dtype)) pandas_testing.assert_series_equal(c1_gctoo.row_metadata_df[c], c1_gctoox.row_metadata_df[c]) # check rows and columns: col_metadata_df self.assertTrue(set(list(c1_gctoo.col_metadata_df.index)) == set(list(c1_gctoox.col_metadata_df.index)), "Mismatch between col_metadata_df index values of gct vs gctx: {} vs {}".format( c1_gctoo.col_metadata_df.index, c1_gctoox.col_metadata_df.index)) self.assertTrue(set(list(c1_gctoo.col_metadata_df.columns)) == set(list(c1_gctoox.col_metadata_df.columns)), "Mismatch between col_metadata_df column values of gct vs gctx: {} vs {}".format( c1_gctoo.col_metadata_df.columns, c1_gctoox.col_metadata_df.columns)) logger.debug("c1 gctoo col_metadata_df columns equal to gctoox col_metadata_df columns? {}".format( set(c1_gctoo.col_metadata_df.columns) == set(c1_gctoox.col_metadata_df.columns))) for c in list(c1_gctoo.col_metadata_df.columns): self.assertTrue(len(list(c1_gctoo.col_metadata_df[c])) == len(list(c1_gctoox.col_metadata_df[c])), "Lengths of column {} differ between gct and gctx".format(c)) self.assertTrue(c1_gctoo.col_metadata_df[c].dtype == c1_gctoox.col_metadata_df[c].dtype, "Dtype mismatch between parsed gct & gctx: {} vs {}".format( c1_gctoo.col_metadata_df[c].dtype, c1_gctoox.col_metadata_df[c].dtype)) pandas_testing.assert_series_equal(c1_gctoo.col_metadata_df[c], c1_gctoox.col_metadata_df[c])
mol_names = [] for filename in gct_files: mol_names.append(filename.split(".gct")[0]) def process(data): df = data.data_df.filter(like='24H', axis=1) if not len(df.columns) == 1: df = df.filter(like='10', axis=1) #df = df.filter(like='CPC0', axis=1) else: pass return df col_names = [] dfs = [] for i in range(0, len(gct_files)): data = parse(gct_files[i]) df = process(data) df = pd.DataFrame(df.mean(axis=1)) if len(df.columns) > 0: col_names.append(mol_names[i]) else: pass dfs.append(df) dfs = [df.set_index(data.row_metadata_df['pr_gene_symbol']) for df in dfs] df = pd.concat(dfs, axis=1) df.columns = col_names df = df.head(978)
def test_parse(self): # L1000 gct l1000_file_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_l1000.gct") l1000_gct = pg.parse(l1000_file_path) logger.debug("parse L1000 gct - l1000_gct: {}".format(l1000_gct)) self.assertEqual(GCToo.GCToo, type(l1000_gct)) # Check a few values self.assertAlmostEqual(l1000_gct.data_df.iloc[0, 0], 11.3819, places=4, msg=("The first value in the data matrix should be " + "{} not {}").format("11.3819", l1000_gct.data_df.iloc[0, 0])) self.assertEqual(l1000_gct.col_metadata_df.iloc[0, 0], 58, msg=("The first value in the column metadata should be " + "{} not {}").format("58", l1000_gct.col_metadata_df.iloc[0, 0])) self.assertEqual(l1000_gct.row_metadata_df.iloc[0, 0], "Analyte 11", msg=("The first value in the row metadata should be " + "{} not {}").format("Analyte 11", l1000_gct.row_metadata_df.iloc[0, 0])) # P100 gct p100_file_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100.gct") p100_gct = pg.parse(p100_file_path) logger.debug("parse P100 gct - p100_gct: {}".format(p100_gct)) self.assertEqual(GCToo.GCToo, type(p100_gct)) # Check a few values self.assertAlmostEqual(p100_gct.data_df.iloc[0, 0], 0.9182, places=4, msg=("The first value in the data matrix should be " + "{} not {}").format("0.9182", p100_gct.data_df.iloc[0, 0])) self.assertEqual(p100_gct.col_metadata_df.iloc[0, 0], "MCF7", msg=("The first value in the column metadata should be " + "{} not {}").format("MCF7", p100_gct.col_metadata_df.iloc[0, 0])) self.assertEqual(p100_gct.row_metadata_df.iloc[0, 0], 1859, msg=("The first value in the row metadata should be " + "{} not {}").format("1859", p100_gct.row_metadata_df.iloc[0, 0])) # GCT1.2 gct_v1point2_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_v1point2_n5x10.gct") gct_v1point2 = pg.parse(gct_v1point2_path) logger.debug("parse GCT1.2 - gct_v1point2: {}".format(gct_v1point2)) self.assertEqual(GCToo.GCToo, type(gct_v1point2)) # Check a few values self.assertAlmostEqual( gct_v1point2.data_df.loc["217140_s_at", "LJP005_A375_24H_X1_B19:A06"], 6.9966, places=4) self.assertEqual(gct_v1point2.row_metadata_df.loc["203627_at", "Description"], "IGF1R") # Make sure col_metadata_df is empty self.assertEqual(gct_v1point2.col_metadata_df.size, 0, "col_metadata_df should be empty.") #row_meta_only=True row_metadata_df = pg.parse(l1000_file_path, row_meta_only=True) logger.debug("row_meta_only=True - row_metadata_df.shape: {}".format(row_metadata_df.shape)) self.assertEqual(pd.DataFrame, type(row_metadata_df)) self.assertEqual((978, 11), row_metadata_df.shape) #col_meta_only=True col_metadata_df = pg.parse(l1000_file_path, col_meta_only=True) logger.debug("col_meta_only=True - col_metadata_df.shape: {}".format(col_metadata_df.shape)) self.assertEqual(pd.DataFrame, type(col_metadata_df)) self.assertEqual((377, 35), col_metadata_df.shape) #subsetting my_rids = ["218597_s_at", "214404_x_at", "209253_at"] my_rids_order_in_gct = ["218597_s_at", "209253_at", "214404_x_at"] my_cidxs = [4, 0] e_data_df = pd.DataFrame(OrderedDict([ ("LJP005_A375_24H_X1_B19:A03", [10.45, 8.14, 4.92]), ("LJP005_A375_24H_X1_B19:A07", [11.04, 7.53, 6.01]) ]), dtype=np.float32) e_data_df.index = pd.Index(my_rids_order_in_gct) e_col_meta_df = pd.DataFrame(OrderedDict([ ("pert_id", ["DMSO", "BRD-K76908866"]), ("pert_iname", ["DMSO", "CP-724714"]) ])) e_col_meta_df.index = pd.Index( ["LJP005_A375_24H_X1_B19:A03", "LJP005_A375_24H_X1_B19:A07"]) out_g = pg.parse(l1000_file_path, rid=my_rids, cidx=my_cidxs) self.assertEqual(out_g.data_df.shape, (3, 2)) # N.B. returned object should have same order as input pd.util.testing.assert_frame_equal(e_data_df, out_g.data_df, check_less_precise=2, check_names=False) pd.util.testing.assert_frame_equal(e_col_meta_df, out_g.col_metadata_df[["pert_id", "pert_iname"]], check_names=False)