Ejemplo n.º 1
0
           similarity_metric, connectivity_metric


def write_success(file_name, start_time_msg):
    # Create timestamp
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # Write timestamp to file_name
    f = open(file_name, 'w')
    f.write(start_time_msg + "\n")
    f.write("external_query_many.py completed at {}\n".format(timestamp))
    f.close()


def write_failure(file_name, start_time_msg):
    # Record stacktrace
    _, _, exc_traceback = sys.exc_info()

    # Write stacktrace to file_name
    f = open(file_name, "w")
    f.write(start_time_msg + "\n")
    traceback.print_exc(exc_traceback, file=f)
    f.close()


if __name__ == "__main__":
    args = build_parser().parse_args(sys.argv[1:])
    setup_logger.setup(verbose=args.verbose)

    main(args)
Ejemplo n.º 2
0
# Location of gcts processed using R code
gct_loc_r_code = "/cmap/data/proteomics/produced_by_jjaffe_code/dry/wget_processed/"
gct_r_code_suffix = "*GCP*.gct"

# Location of gcts processed using dry
gct_loc_dry = "/cmap/data/proteomics/dry/2016-10-10/"

# Suffix for files created with dry
dry_suffix = ".gct.dry.processed.gct"

##########

# Setup logger
logger = logging.getLogger(setup_logger.LOGGER_NAME)
setup_logger.setup(verbose=False)

# Get R files
all_r_files = glob.glob(gct_loc_r_code + gct_r_code_suffix)

assert len(all_r_files) > 0, "No R files were found!"
logger.debug("{} R files found".format(len(all_r_files)))

# For each R file, see if we can find its complementary file created with dry
# Only keep the R files with a complement
r_files = []
dry_files = []

for r_file in all_r_files:
    dry_file_path = os.path.join(
        gct_loc_dry,
Ejemplo n.º 3
0
        s3 = mock.Mock()
        s3.Object = mock.Mock()
        s3.Object.return_value.get = mock.MagicMock()

        s3Dict = {"Body": True}

        def s3_get_item(key):
            return s3Dict[key]

        s3.Object.return_value.get.__getitem__.side_effect = s3_get_item

        # happy path
        s3.Object.return_value.get.read = mock.Mock()
        ltb.json.loads = mock.Mock(return_value={"id": "fake_id"})

        returned_tuple = ltb.get_panorama_request_and_parse(
            s3, bucket_name, current_gct_key)
        expected_tuple = ("fake_id", "this_plate_name")
        self.assertEqual(expected_tuple, returned_tuple)

        #unhappy path s3 reading exception
        s3.Object.side_effect = Exception("failure")
        with self.assertRaises(Exception) as context:
            returned_tuple = ltb.get_panorama_request_and_parse(
                s3, bucket_name, current_gct_key)
        self.assertEqual("failure", context.exception[0][0])


if __name__ == "__main__":
    setup_logger.setup(verbose=True)
    unittest.main()
Ejemplo n.º 4
0
        assert len(meta_value) == num_wells, (
            "The entry {} has length {}, which is not equal to num_wells: {}.".
            format(meta_key, len(meta_value), num_wells))

    # Append the metadata_dict to plate_and_well_dict
    df_dict = plate_and_well_dict.copy()
    df_dict.update(metadata_dict)

    # Convert dict to df and rearrange columns appropriately
    temp_df = pd.DataFrame.from_dict(df_dict)
    cols = [PLATE_FIELD, WELL_FIELD] + sorted(metadata_dict.keys())
    out_df = temp_df[cols]

    return out_df


def undo_log_transform_if_needed(data_df, prov_code):
    """Undo log transformation if L2X is in prov_code."""
    if LOG_TRANSFORM_PROV_CODE_ENTRY in prov_code:
        out_df = np.exp2(data_df)
    else:
        out_df = data_df

    return out_df


if __name__ == "__main__":
    args = build_parser().parse_args(sys.argv[1:])
    setup_logger.setup()
    main(args)