コード例 #1
0
ファイル: snpreader.py プロジェクト: MMesbahU/PySnpTools
    def read_kernel(self, standardizer=None, block_size=None, order='A', dtype=np.float64, force_python_only=False, view_ok=False):
        """Returns a :class:`KernelData` such that the :meth:`KernelData.val` property will be a ndarray of the standardized SNP values multiplied with their transposed selves.

        :param standardizer: -- (required) Specify standardization to be applied before the matrix multiply. Any :class:`.Standardizer` may be used. Some choices include :class:`Standardizer.Identity` 
            (do nothing), :class:`.Unit` (make values for each SNP have mean zero and standard deviation 1.0) and :class:`Beta`.
        :type standardizer: :class:`.Standardizer`

        :param block_size: optional -- Default of None (meaning to load all). Suggested number of sids to read into memory at a time.
        :type block_size: int or None

        :rtype: class:`KernelData`

        Calling the method again causes the SNP values to be re-read and allocates a new class:`KernelData`.

        When applied to an read-from-disk SnpReader, such as :class:`.Bed`, the method can save memory by reading (and standardizing) the data in blocks.

        :Example:

        >>> from pysnptools.snpreader import Bed
        >>> from pysnptools.standardizer import Unit
        >>> snp_on_disk = Bed('../../tests/datasets/all_chr.maf0.001.N300') # Specify SNP data on disk
        >>> kerneldata1 = snp_on_disk.read_kernel(Unit())
        >>> print int(kerneldata1.iid_count), kerneldata1.val[0,0]
        300 901.421835903
        """
        assert standardizer is not None, "'standardizer' must be provided"

        from pysnptools.kernelreader import SnpKernel
        snpkernel = SnpKernel(self,standardizer=standardizer,block_size=block_size)
        kerneldata = snpkernel.read(order, dtype, force_python_only, view_ok)
        return kerneldata
コード例 #2
0
    def read_kernel(self, standardizer=None, block_size=None, order='A', dtype=np.float64, force_python_only=False, view_ok=False):
        """Returns a :class:`KernelData` such that the :meth:`KernelData.val` property will be a ndarray of the standardized SNP values multiplied with their transposed selves.

        :param standardizer: -- (required) Specify standardization to be applied before the matrix multiply. Any :class:`.Standardizer` may be used. Some choices include :class:`Standardizer.Identity`
            (do nothing), :class:`.Unit` (make values for each SNP have mean zero and standard deviation 1.0) and :class:`Beta`.
        :type standardizer: :class:`.Standardizer`

        :param block_size: optional -- Default of None (meaning to load all). Suggested number of sids to read into memory at a time.
        :type block_size: int or None

        :rtype: class:`KernelData`

        Calling the method again causes the SNP values to be re-read and allocates a new class:`KernelData`.

        When applied to an read-from-disk SnpReader, such as :class:`.Bed`, the method can save memory by reading (and standardizing) the data in blocks.

        :Example:

        >>> from pysnptools.snpreader import Bed
        >>> from pysnptools.standardizer import Unit
        >>> snp_on_disk = Bed('tests/datasets/all_chr.maf0.001.N300',count_A1=False) # Specify SNP data on disk
        >>> kerneldata1 = snp_on_disk.read_kernel(Unit())
        >>> print(int(kerneldata1.iid_count), kerneldata1.val[0,0])
        300 901.421835903
        """
        assert standardizer is not None, "'standardizer' must be provided"

        from pysnptools.kernelreader import SnpKernel
        snpkernel = SnpKernel(self,standardizer=standardizer,block_size=block_size)
        kerneldata = snpkernel.read(order, dtype, force_python_only, view_ok)
        return kerneldata
コード例 #3
0
 def test_merge_std(self):
     #unit vs beta
     for std in [stdizer.Beta(2, 10), stdizer.Unit()]:
         np.random.seed(0)
         sid_count = 20
         snpreader = SnpData(iid=[["0", "0"], ["1", "1"], ["2", "2"]],
                             sid=[str(i) for i in range(sid_count)],
                             val=np.array(np.random.randint(
                                 3, size=[3, sid_count]),
                                          dtype=np.float64,
                                          order='F'))
         kerneldata0, trained0, diag0 = SnpKernel(
             snpreader, std,
             block_size=1)._read_with_standardizing(to_kerneldata=True,
                                                    return_trained=True)
         kerneldata1, trained1, diag1 = SnpKernel(
             snpreader, std,
             block_size=None)._read_with_standardizing(to_kerneldata=True,
                                                       return_trained=True)
         np.testing.assert_array_almost_equal(kerneldata0.val,
                                              kerneldata1.val,
                                              decimal=10)
         np.testing.assert_array_almost_equal(trained0.stats,
                                              trained1.stats,
                                              decimal=10)
         assert abs(diag0.factor - diag1.factor) < 1e-7
コード例 #4
0
    def test_respect_read_inputs(self):
        from pysnptools.kernelreader import KernelHdf5, Identity, KernelNpz, SnpKernel
        from pysnptools.standardizer import Unit
        from pysnptools.standardizer import Identity as StdIdentity
        from pysnptools.snpreader import Bed

        previous_wd = os.getcwd()
        os.chdir(os.path.dirname(os.path.realpath(__file__)))

        iidref = KernelNpz('../examples/toydata.kernel.npz').iid

        for kernelreader in [
                SnpKernel(Bed('../examples/toydata.5chrom.bed', count_A1=True),
                          StdIdentity())[::2, ::2],
                Bed('../examples/toydata.5chrom.bed',
                    count_A1=True)[::2, ::2].read_kernel(StdIdentity()),
                KernelHdf5('../examples/toydata.kernel.hdf5'),
                Identity(iidref, test=[('0', 'x'), ('0', 'y')]),
                Identity(iidref),
                KernelNpz('../examples/toydata.kernel.npz'),
                KernelNpz('../examples/toydata.kernel.npz').read(),
                KernelNpz('../examples/toydata.kernel.npz')[::2, ::2],
                Bed('../examples/toydata.5chrom.bed',
                    count_A1=True).read_kernel(Unit()),
                SnpKernel(Bed('../examples/toydata.5chrom.bed', count_A1=True),
                          Unit())
        ]:
            logging.info(str(kernelreader))
            for order in ['F', 'C', 'A']:
                for dtype in [np.float32, np.float64]:
                    for force_python_only in [True, False]:
                        for view_ok in [True, False]:
                            val = kernelreader.read(
                                order=order,
                                dtype=dtype,
                                force_python_only=force_python_only,
                                view_ok=view_ok).val
                            has_right_order = order == "A" or (
                                order == "C" and val.flags["C_CONTIGUOUS"]
                            ) or (order == "F" and val.flags["F_CONTIGUOUS"])
                            if hasattr(kernelreader, 'val') and not view_ok:
                                assert kernelreader.val is not val
                            if (hasattr(kernelreader, 'val') and view_ok
                                    and kernelreader.val is not val and
                                (order == 'A' or
                                 (order == 'F' and
                                  kernelreader.val.flags['F_CONTIGUOUS']) or
                                 (order == 'C'
                                  and kernelreader.val.flags['C_CONTIGUOUS']))
                                    and (dtype is None
                                         or kernelreader.val.dtype == dtype)):
                                logging.info(
                                    "{0} could have read a view, but didn't".
                                    format(distreader))
                            assert val.dtype == dtype and has_right_order
        os.chdir(previous_wd)
コード例 #5
0
ファイル: __init__.py プロジェクト: eric-czech/PySnpTools
def _reindex_snpkernel(snpkernel, iididx, is_test=False):
    from pysnptools.kernelreader import SnpKernel
    if not is_test:
        new_reader = snpkernel.snpreader[iididx,:]
        reference = new_reader
        result = SnpKernel(new_reader,snpkernel.standardizer,block_size=snpkernel.block_size)
    else:
        new_reader = snpkernel.test[iididx,:]
        result = SnpKernel(snpkernel.snpreader,snpkernel.standardizer,block_size=snpkernel.block_size)
    return result
コード例 #6
0
    def test_api(self):
        train_idx = np.r_[10:self.snpreader_whole.iid_count] # iids 10 and on
        test_idx  = np.r_[0:10] # the first 10 iids

        #####################################################
        # Train and standardize cov and then apply to test
        #####################################################

        cov_train, unit_trained = self.covariate_whole[train_idx,:].read().standardize(Unit(),return_trained=True)
        cov_test = self.covariate_whole[test_idx,:].read().standardize(unit_trained)

        #####################################################
        # standardize whole kernel from snps (both ways) and then pull out the 3 parts
        #####################################################
        
        whole_kernel = SnpKernel(self.covariate_whole,Unit()).read().standardize(DiagKtoN())
        train_kernel = whole_kernel[train_idx].read(order='A',view_ok=True)
        test_kernel = whole_kernel[train_idx,test_idx].read(order='A',view_ok=True)
        test_test_kernel = whole_kernel[test_idx,test_idx].read(order='A',view_ok=True)

        #####################################################
        # create train_train, train_test, and test_test based on just the training snps (both standardizations)
        #####################################################

        K_train = SnpKernel(self.snpreader_whole[train_idx,:],Unit(),block_size=100)
        train_train_kernel, snp_trained, kernel_trained = K_train._read_with_standardizing(to_kerneldata=True, kernel_standardizer=DiagKtoN(), return_trained=True)

        K_whole_test = _SnpWholeTest(train=self.snpreader_whole[train_idx,:],test=self.snpreader_whole[test_idx,:],standardizer=snp_trained,block_size=100)
        train_idx2 = K_whole_test.iid0_to_index(self.snpreader_whole.iid[train_idx]) #The new reader may have the iids in a different order than the original reader
        train_test_kernel = K_whole_test[train_idx2,:].read().standardize(kernel_trained)

        test_idx2 = K_whole_test.iid0_to_index(self.snpreader_whole.iid[test_idx])
        test_test_kernel = K_whole_test[test_idx2,:].read().standardize(kernel_trained)

        #####################################################
        # How does predict look with whole_test as input?
        #####################################################

        # a. - standardize whole up front
        whole_kernel = SnpKernel(self.snpreader_whole,Unit(),block_size=100).read().standardize()
        train_kernel = whole_kernel[train_idx].read(order='A',view_ok=True)
        whole_test_kernel = whole_kernel[:,test_idx].read(order='A',view_ok=True)
        fastlmm1 = FastLMM(snp_standardizer=SS_Identity(), kernel_standardizer=KS_Identity())
        fastlmm1.fit(K0_train=train_kernel, X=self.covariate_whole, y=self.pheno_whole) #iid intersection means we won't really be using whole covar or pheno
        predicted_pheno, covar = fastlmm1.predict(K0_whole_test=whole_test_kernel, X=self.covariate_whole,count_A1=False)
        output_file = self.file_name("whole")
        Dat.write(output_file,predicted_pheno)
        self.compare_files(predicted_pheno,"whole")

        # b -- just files
        fastlmm2 = FastLMM()
        fastlmm2.fit(K0_train=self.snpreader_whole[train_idx,:], X=self.covariate_whole, y=self.pheno_whole[train_idx,:]) #iid intersection means we won't really be using whole covar
        predicted_pheno, covar = fastlmm2.predict(K0_whole_test=self.snpreader_whole[test_idx,:], X=self.covariate_whole,count_A1=False)
        self.compare_files(predicted_pheno,"one")
コード例 #7
0
    def test_intersection(self):

        from pysnptools.standardizer import Unit
        from pysnptools.kernelreader import SnpKernel
        from pysnptools.snpreader import Pheno
        from pysnptools.kernelreader._subset import _KernelSubset
        from pysnptools.snpreader._subset import _SnpSubset
        from pysnptools.util import intersect_apply

        snps_all = Bed(self.currentFolder + "/../examples/toydata.5chrom.bed",
                       count_A1=False)
        k = SnpKernel(snps_all, stdizer.Identity())

        pheno = Pheno(self.currentFolder + "/../examples/toydata.phe")
        pheno = pheno[1:, :]  # To test intersection we remove a iid from pheno

        k1, pheno = intersect_apply([
            k, pheno
        ])  #SnpKernel is special because it standardizes AFTER intersecting.
        assert isinstance(k1.snpreader,
                          _SnpSubset) and not isinstance(k1, _KernelSubset)

        #What happens with fancy selection?
        k2 = k[::2]
        assert isinstance(k2, SnpKernel)

        logging.info("Done with test_intersection")
コード例 #8
0
 def test_snp_kernel2(self):
     logging.info("in test_snp_kernel2")
     snpreader = Bed(self.currentFolder + "/../examples/toydata.5chrom.bed",
                     count_A1=False)
     snpkernel = SnpKernel(snpreader, standardizer=stdizer.Beta(1, 25))
     s = str(snpkernel)
     _fortesting_JustCheckExists().input(snpkernel)
コード例 #9
0
    def test_mixingKs(self):
        logging.info("TestSingleSnp test_mixingKs")
        test_snps = Bed(self.bedbase)
        pheno = self.phen_fn
        covar = self.cov_fn

        output_file_name = self.file_name("mixingKs")
        frame = single_snp(test_snps=test_snps[:, :10],
                           pheno=pheno,
                           K0=SnpKernel(test_snps[:, 10:100], Unit()),
                           leave_out_one_chrom=False,
                           covar=covar,
                           K1=SnpKernel(test_snps[:, 100:200], Unit()),
                           mixing=None,
                           output_file_name=output_file_name)

        self.compare_files(frame, "mixing")
コード例 #10
0
def _K_per_chrom(K, chrom, iid,count_A1=None):
    if K is None:
        return KernelIdentity(iid)
    else:
        K_all = _kernel_fixup(K, iid_if_none=iid, standardizer=Unit(),count_A1=count_A1) 
        if isinstance(K_all, SnpKernel):
            return SnpKernel(K_all.snpreader[:,K_all.pos[:,0] != chrom],K_all.standardizer)
        else:
            raise Exception("Don't know how to make '{0}' work per chrom".format(K_all))
コード例 #11
0
ファイル: fastlmm_predictor.py プロジェクト: fastlmm/FaST-LMM
    def __getitem__(self, iid_indexer_and_snp_indexer):
        if isinstance(iid_indexer_and_snp_indexer, tuple):
            iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer
        else:
            iid0_indexer = iid_indexer_and_snp_indexer
            iid1_indexer = iid0_indexer

        row_index_or_none = PstReader._make_sparray_from_sparray_or_slice(
            self.row_count, iid0_indexer)
        col_index_or_none = PstReader._make_sparray_from_sparray_or_slice(
            self.col_count, iid1_indexer)

        if row_index_or_none is None:
            row_index_or_none = list(range(self.row_count))

        assert not isinstance(row_index_or_none,
                              str), "row_index_or_none should not be a string"
        iid = self.row[row_index_or_none]

        if col_index_or_none is None or np.array_equal(
                col_index_or_none, list(range(self.col_count))):
            test = self.test
        else:
            test = self.test[col_index_or_none]

        try:  #case 1: asking for train x test
            train = self.train[self.train.iid_to_index(iid), :]
            is_ok = True
        except:
            is_ok = False
        if is_ok:
            return _SnpTrainTest(train=train,
                                 test=test,
                                 standardizer=self.standardizer,
                                 block_size=self.block_size)

        #case 2: asking for train x test
        if np.array_equal(test.iid, iid):
            return SnpKernel(test,
                             standardizer=self.standardizer,
                             block_size=self.block_size)

        #case 3: Just re-reordering the iids
        if len(row_index_or_none) == self.row_count and (
                col_index_or_none is None
                or len(col_index_or_none) == self.col_count):
            result = _SnpWholeTest(train=self.train,
                                   test=test,
                                   standardizer=self.standardizer,
                                   block_size=self.block_size,
                                   iid0=iid)
            return result

        raise Exception(
            "When reading from a _SnpWholeTest, can only ask to reorder iids or to access from train x test or test x test"
        )
コード例 #12
0
ファイル: pairs.py プロジェクト: fastlmm/PySnpTools
def epi_reml(pair_snps,
             pheno,
             covar=None,
             kernel_snps=None,
             output_dir='results',
             part_count=33,
             runner=None,
             override=False):
    from pysnptools.kernelreader import SnpKernel
    from pysnptools.standardizer import Unit
    import datetime
    from fastlmm.association import single_snp

    part_list = list(split_on_sids(pair_snps, part_count))
    part_pair_count = (part_count * part_count + part_count) / 2
    part_pair_index = -1
    print("part_pair_count={0:,}".format(part_pair_count))
    K0 = SnpKernel(kernel_snps or pair_snps,
                   standardizer=Unit()).read()  #Precompute the similarity
    if not os.path.exists(output_dir): os.makedirs(output_dir)
    start_time = datetime.datetime.now()
    for i in range(part_count):
        part_i = part_list[i]
        for j in range(i, part_count):
            part_pair_index += 1
            pairs = _Pairs2(part_i) if i == j else _Pairs2(
                part_i, part_list[j])
            print("Looking at pair {0},{1} which is {2} of {3}".format(
                i, j, part_pair_index, part_pair_count))
            output_file = '{0}/result.{1}.{2}.tsv'.format(
                output_dir, part_pair_index, part_pair_count)
            if override or not os.path.exists(output_file):
                result_df_ij = single_snp(pairs,
                                          K0=K0,
                                          pheno=pheno,
                                          covar=covar,
                                          leave_out_one_chrom=False,
                                          count_A1=True,
                                          runner=runner)
                result_df_ij.to_csv(output_file, sep="\t", index=False)
                print(result_df_ij[:1])
                time_so_far = datetime.datetime.now() - start_time
                total_time_estimate = time_so_far * part_pair_count / (
                    part_pair_index + 1)
                print(total_time_estimate)
コード例 #13
0
    def test_subset(self):
        logging.info("in test_subset")
        snpreader = Bed(self.currentFolder + "/../examples/toydata.5chrom.bed",
                        count_A1=False)
        snpkernel = SnpKernel(snpreader, stdizer.Unit())
        krsub = snpkernel[::2, ::2]
        kerneldata1 = krsub.read()
        expected = snpreader.read_kernel(stdizer.Unit())[::2].read()
        np.testing.assert_array_almost_equal(kerneldata1.val,
                                             expected.val,
                                             decimal=10)

        krsub2 = snpkernel[::2]
        kerneldata2 = krsub2.read()
        np.testing.assert_array_almost_equal(kerneldata2.val,
                                             expected.val,
                                             decimal=10)
        logging.info("done with test")
コード例 #14
0
ファイル: fastlmm_predictor.py プロジェクト: fastlmm/FaST-LMM
def _kernel_fixup(input,
                  iid_if_none,
                  standardizer,
                  test=None,
                  test_iid_if_none=None,
                  block_size=None,
                  train_snps=None,
                  count_A1=None):
    if test is not None and input is None:
        input = test
        test = None

    if isinstance(input, str) and input.endswith(".npz"):
        return KernelNpz(input)

    if isinstance(input, str):
        input = Bed(input, count_A1=count_A1
                    )  #Note that we don't return here. Processing continues
    if isinstance(test, str):
        test = Bed(test, count_A1=count_A1
                   )  #Note that we don't return here. Processing continues

    if isinstance(input, SnpReader):
        if test is not None:
            return _SnpWholeTest(train=train_snps,
                                 test=test,
                                 standardizer=standardizer,
                                 block_size=block_size)
        else:
            return SnpKernel(input,
                             standardizer=standardizer,
                             block_size=block_size)

    if input is None:
        return KernelIdentity(iid=iid_if_none, test=test_iid_if_none)

    return input
コード例 #15
0
ファイル: fastlmmmodel.py プロジェクト: DSLituiev/FaST-LMM
def _kernel_fixup(input,
                  iid_if_none,
                  standardizer,
                  test=None,
                  test_iid_if_none=None):
    if test is not None and input is None:
        input = test
        test = None

    if isinstance(input, str) and input.endswith(".npz"):
        return KernelNpz(input)

    if isinstance(input, str):
        input = Bed(input)
    if isinstance(test, str):
        test = Bed(test)

    if isinstance(input, SnpReader):
        return SnpKernel(input, standardizer=standardizer, test=test)

    if input is None:
        return KernelIdentity(iid=iid_if_none, test=test_iid_if_none)

    return input
コード例 #16
0
    def combine_the_best_way(K0, K1, covar, y, mixing, h2, force_full_rank=False, force_low_rank=False,snp_standardizer=None,kernel_standardizer=None,block_size=None):
        from pysnptools.kernelstandardizer import Identity as KS_Identity

        assert K0.iid0 is K0.iid1, "Expect K0 to be square"
        assert K1.iid0 is K1.iid1, "Expect K1 to be square"
        assert K0 is not None
        assert K1 is not None
        assert np.array_equal(K0.iid,K1.iid), "Expect K0 and K1 to having matching iids"
        assert kernel_standardizer is not None, "expect values for kernel_standardizer"

        mixer = _Mixer(False,KS_Identity(),KS_Identity(),mixing)

        sid_count_0 = _Mixer.sid_counter(K0, force_full_rank, force_low_rank)
        sid_count_1 = _Mixer.sid_counter(K1, force_full_rank, force_low_rank)

        #################################
        # Both Identity (or not given)
        #################################
        if sid_count_0 + sid_count_1 == 0:
            h2 = h2 or 0
            mixer.mixing = mixer.mixing or 0
            K = K0.read() #would be nice to use LinearRegression or low-rank with 0 snps

        #################################
        #
        #################################
        elif sid_count_0 + sid_count_1 < K0.iid_count or force_low_rank:
            mixer.do_g = True
            #!!!there is no need for block_size here because we want G0 in full. But if starting with SNPs and not low-rank then batches are needed and the two standardizers must be remembered for use later

            if sid_count_0 > 0:
                K0, mixer.snp_trained0, mixer.kernel_trained0 = K0._read_with_standardizing(to_kerneldata=not mixer.do_g, kernel_standardizer=kernel_standardizer, return_trained=True)
            if sid_count_1 > 0:
                K1, mixer.snp_trained1, mixer.kernel_trained1 = K1._read_with_standardizing(to_kerneldata=not mixer.do_g, kernel_standardizer=kernel_standardizer, return_trained=True)

            if sid_count_1 == 0:
                mixer.mixing = mixer.mixing or 0
                K = K0
            elif sid_count_0 == 0:
                mixer.mixing = mixer.mixing or 1
                K = K1
            else:
                if mixer.do_g:
                    G = np.empty((K0.iid_count, K0.sid_count + K1.sid_count))
                    if mixer.mixing is None:
                        mixer.mixing, h2 = _find_mixing_from_Gs(G, covar, K0.snpreader.val, K1.snpreader.val, h2, y)

                    if mixer.mixing == 0:
                        K = K0
                    elif mixer.mixing == 1:
                        K = K1
                    else:
                        _mix_from_Gs(G, K0.snpreader.val, K1.snpreader.val, mixer.mixing)
                        G = SnpData(iid=K0.iid,
                                            sid=["K0_{0}".format(i) for i in xrange(K0.sid_count)]+["K1_{0}".format(i) for i in xrange(K1.sid_count)], #rename the sids so that they can't collide.
                                            val=G,name="{0}&{1}".format(K0.snpreader,K1.snpreader),
                                            pos=np.concatenate((K0.pos,K1.pos),axis=0)
                                            )
                        K = SnpKernel(G,SS_Identity(),block_size=block_size)
        else:
            mixer.do_g = False
            if sid_count_0 > 0: #!!!but what if we have SNP data but still need to remember the standardizer?
                K0, mixer.snp_trained0, mixer.kernel_trained0 = K0._read_with_standardizing(to_kerneldata=True,return_trained=True)#!!!pass in a new argument, the kernel_standardizer(???)

            if sid_count_1 > 0:
                K1, mixer.snp_trained1, mixer.kernel_trained1 = K1._read_with_standardizing(to_kerneldata=True,return_trained=True)

            if sid_count_1 == 0:
                mixer.mixing = mixer.mixing or 0
                K = K0
            elif sid_count_0 == 0:
                mixer.mixing = mixer.mixing or 1
                K = K1
            else:
                K = np.empty(K0.val.shape)
                if mixer.mixing is None:
                    mixer.mixing, h2 = _find_mixing_from_Ks(K, covar, K0.val, K1.val, h2, y)
                _mix_from_Ks(K, K0.val, K1.val, mixer.mixing)
                assert K.shape[0] == K.shape[1] and abs(np.diag(K).sum() - K.shape[0]) < 1e-7, "Expect mixed K to be standardized"
                K = KernelData(val=K,iid=K0.iid)

        return K, h2, mixer
コード例 #17
0
ファイル: pairs.py プロジェクト: eric-czech/PySnpTools
                snpdata = pairs.read()#
                #print(snpdata.val)

    import datetime
    from pysnptools.kernelreader import SnpKernel
    from pysnptools.standardizer import Unit
    from pysnptools.util.mapreduce1.runner import LocalMultiProc
    from pysnptools.util.mapreduce1 import map_reduce
    #runner=None
    runner = LocalMultiProc(1,just_one_process=False)

    part_pair_count = (part_count*part_count+part_count)//2
    part_pair_index = -1
    print("part_pair_count={0:,}".format(part_pair_count))

    K0 = SnpKernel(synbed,standardizer=Unit()).read() #Precompute the similarity

    start_time = datetime.datetime.now()
    for i,part_i in enumerate(part_list):
        def mapper1(j):
            #from fastlmm.association import single_snp
            #from pysnptools.snpreader import Pairs
            #print('Z')
            #part_j = part_list[j]
            #print('A')
            print("Looking at pair {0},{1} which is {2} of {3}".format(i,j,part_pair_index+j+1,part_pair_count))
            #pairs = Pairs(part_i) if i==j else Pairs(part_i,part_j)
            #result_df_ij = single_snp(pairs, K0=K0, pheno=pheno_fn, covar=cov_fn, leave_out_one_chrom=False, count_A1=True)
            #print(result_df_ij[:1])
            #return result_df_ij