def test_days_taking(self):
     patient = Patient(prescriptions=[Prescription("Paracetamol", dispense_date = date.today() - timedelta(days=2), days_supply=2),
                                      Prescription("Paracetamol", dispense_date = date.today() - timedelta(days=3), days_supply=2)])
     assert patient.days_taking("Paracetamol") == set([date.today() - timedelta(days=3),
                                                  date.today() - timedelta(days=2), 
                                                  date.today() - timedelta(days=1)])
     
Example #2
0
def initData():
   """Load data and mappings from Raw data files and mapping files"""
   Patient.load()
   Med.load()
   Problem.load()
   Lab.load()
   Refill.load()
Example #3
0
 def add_patient_clicked(self):
     text, ok = QtGui.QInputDialog.getText(self, '', 'Enter patient name:')
     if ok:
         new_file_name = os.path.join(PATIENT_DIR, str(text) + '.pkl')
         if os.path.exists(new_file_name):
             QtGui.QMessageBox.about(self, 'Error', 'Name already exists!')
         else:
             new_patient = Patient(str(text))
     else:
         return
     text, ok = QtGui.QInputDialog.getInt(self, '', 'Enter patient age:')
     if ok:
         new_patient.age = int(text)
     else:
         return
     text, ok = QtGui.QInputDialog.getText(self, '', 'Enter patient diagnosis:')
     if ok:
         new_patient.diagnosis = str(text)
     else:
         return
     text, ok = QtGui.QInputDialog.getItem(self, '', 'Enter patient gender:', ['Female', 'Male', 'Other'])
     if ok:
         new_patient.gender = str(text)
     else:
         return
     pickle.dump(new_patient, open(new_file_name, 'wb'))
     self.update_patient_list()
Example #4
0
 def test_clash_with_two_different_prescriptions(self):
     patient = Patient(
         prescriptions=[
             Prescription("Codeine", dispense_date=days_ago(days=2), days_supply=2),
             Prescription("Prozac", dispense_date=days_ago(days=2), days_supply=2),
         ]
     )
     assert patient.clash(["Codeine", "Prozac"]) == {days_ago(days=2), days_ago(days=1)}
Example #5
0
 def test_clash_with_two_prescriptions_for_same_medication(self):
     patient = Patient(
         prescriptions=[
             Prescription("Codeine", dispense_date=days_ago(days=2), days_supply=2),
             Prescription("Codeine", dispense_date=days_ago(days=3), days_supply=2),
         ]
     )
     assert patient.clash(["Codeine"]) == {days_ago(days=3), days_ago(days=2), days_ago(days=1)}
Example #6
0
 def test_days_taking(self):
     patient = Patient(
         prescriptions=[
             Prescription("Codeine", dispense_date=days_ago(days=2), days_supply=2),
             Prescription("Codeine", dispense_date=days_ago(days=3), days_supply=2),
         ]
     )
     assert patient.days_taking("Codeine") == {days_ago(days=3), days_ago(days=2), days_ago(days=1)}
Example #7
0
 def test_clash_overlapping_today(self):
     patient = Patient(
         prescriptions=[
             Prescription("Codeine", dispense_date=days_ago(days=2), days_supply=3),
             Prescription("Prozac", dispense_date=days_ago(days=2), days_supply=3),
         ]
     )
     assert patient.clash(["Codeine", "Prozac"]) == {days_ago(days=2), days_ago(days=1)}
Example #8
0
def index():
    form = MyForm()
    results = None
    resultsPrint = None
    if form.validate_on_submit():
        '''Try biomarker: estrogen receptor'''
        patient_file = Patient(form.age.data,form.age_unit.data,form.gender.data,form.biomarker.data)
        results = searcher.search(patient_file._get_query_string())
        resultsPrint = searcher.print_results(results,form.biomarker.data)
    return render_template('search.html', form=form, results = resultsPrint)
def script(patient_filename, hpo_filename, disease_phenotype_filename, 
           orphanet_lookup_filename=None, orphanet_prevalence_filename=None, proto=None, 
           use_disease_prevalence=False, use_phenotype_frequency=False, 
           use_patient_phenotypes=False, distribute_ic_to_leaves=False,
           use_aoo=False, scores=None):
    hpo = HPO(hpo_filename, new_root='HP:0000118')
    diseases = Diseases(disease_phenotype_filename)

    orphanet = None
    if orphanet_lookup_filename and orphanet_prevalence_filename:
        orphanet = Orphanet(orphanet_prevalence_filename, lookup_filename=orphanet_lookup_filename)

    patients = [patient 
                for patient in Patient.iter_from_file(patient_filename, hpo)
                if patient.hp_terms]

    if proto:
        proto = [patient 
                 for patient in Patient.iter_from_file(proto, hpo)
                 if patient.hp_terms]

    if use_patient_phenotypes:
        use_patient_phenotypes = patients

    hpoic = HPOIC(hpo, diseases, orphanet=orphanet, patients=use_patient_phenotypes,
                  use_disease_prevalence=use_disease_prevalence,
                  use_phenotype_frequency=use_phenotype_frequency,
                  distribute_ic_to_leaves=distribute_ic_to_leaves)

    total_patient_logprob = 0
    for patient in patients:
        total_patient_logprob += hpoic.information_content(patient.hp_terms)

    logging.info('Total patient logprob: {:.1f}'.format(-total_patient_logprob))

    header = None
    for i in range(len(patients)):
        patient = patients[i]
        id1 = patient.external_id if patient.external_id else patient.id
        compare_against = [patients[j] for j in range(i+1, len(patients))]
        if proto:
            compare_against.extend(proto)

        for o in compare_against:
            id2 = o.external_id if o.external_id else o.id
            sims = compare_patients(hpoic, patient, o, scores=scores, use_aoo=use_aoo)
            if header is None:
                header = sorted(sims)
                print('\t'.join(['A', 'B'] + header))

            sim_strs = ['{:.6f}'.format(sims[sim]) for sim in header]
            for sim, sim_str in zip(header, sim_strs):
                logging.debug('{}: {}'.format(sim, sim_str))
            print('\t'.join([id1, id2] + sim_strs))
Example #10
0
def initData():
   """Load data and mappings from Raw data files and mapping files"""
   Patient.load()
   Med.load()
   Problem.load()
   Lab.load()
   Refill.load()
   VitalSigns.load()
   Immunization.load()
   Procedure.load()
   SocialHistory.load()
   FamilyHistory.load()
   Allergy.load()
Example #11
0
def script(patient_hpo_filename, hpo_filename, disease_phenotype_filename, 
           **kwargs):
    hpo = HPO(hpo_filename, new_root='HP:0000118')
    diseases = Diseases(disease_phenotype_filename)
    hpoic = HPOIC(hpo, diseases)

    print('\t'.join(['Patient ID', 'External ID', 'IC']))
    for patient in Patient.iter_from_file(patient_hpo_filename, hpo):
        print('\t'.join(map(str, [patient.id, patient.external_id, hpoic.information_content(patient.hp_terms)])))
 def test_clash_with_two_different_prescriptions(self):
     patient = Patient(prescriptions=[Prescription("Paracetamol", dispense_date = date.today() - timedelta(days=2), days_supply=2),
                                      Prescription("Aspirin",     dispense_date = date.today() - timedelta(days=2), days_supply=2)])
     assert patient.clash(["Paracetamol", "Aspirin"]) == set([date.today() - timedelta(days=2), date.today() - timedelta(days=1)])
Example #13
0
    
    def apply(self, param):
        param.name = '{}:{}'.format(param.name, self.value)
    

# announce all handlers
FHIRSearchParamHandler.announce_handler(FHIRSearchParamModifierHandler)
FHIRSearchParamHandler.announce_handler(FHIRSearchParamOperatorHandler)
FHIRSearchParamHandler.announce_handler(FHIRSearchParamMultiHandler)
FHIRSearchParamHandler.announce_handler(FHIRSearchParamTypeHandler)


if '__main__' == __name__:
    from patient import Patient
    print('1 '+FHIRSearch(Patient, {'name': 'Willis'}).construct())
    print('1 '+Patient.where({'name': 'Willis'}).construct())
    print('1 '+Patient.where().name('Willis').construct())
    print('= Patient?name=Willis')
    print('')
    print('2 '+FHIRSearch(Patient, {'name': {'$exact': 'Willis'}}).construct())
    print('= Patient?name:exact=Willis')
    print('')
    print('3 '+FHIRSearch(Patient, {'name': {'$or': ['Willis', 'Wayne', 'Bruce']}}).construct())
    print('= Patient?name=Willis,Wayne,Bruce')
    print('')
    print('4 '+FHIRSearch(Patient, {'name': {'$and': ['Willis', {'$exact': 'Bruce'}]}}).construct())
    print('= Patient?name=Willis&name:exact=Bruce')
    print('')
    print('5 '+FHIRSearch(Patient, {'birthDate': {'$gt': '1950', '$lte': '1970'}}).construct())
    print('= Patient?birthDate=>1950&birthDate=<=1970')
    print('')
Example #14
0
      # Show progress with '.' characters
      print ".", 
      sys.stdout.flush()
    parser.exit(0,"\nDone writing %d patient RDF files!"%len(Patient.mpi))

  # Write all patient RDF files out to a directory
  if args.writeIndivo:
    print "Writing files to %s:"%args.writeIndivo
    initData()
    path = args.writeIndivo
    if not os.path.exists(path):
      parser.error("Invalid path: '%s'.Path must already exist."%path)
    if not path.endswith('/'): path = path+'/' # Works with DOS? Who cares??

    import indivo

    for pid in Patient.mpi:
      f = open(path+"p%s.py"%pid,'w')
      indivo.writePatientFile(f, pid)
      f.close()
      # Show progress with '.' characters
      print ".", 
      sys.stdout.flush()
    parser.exit(0,"\nDone writing %d patient RDF files!"%len(Patient.mpi))

  # Generate a new patients data file, re-randomizing old names, dob, etc:
  Patient.generate()  
  parser.exit(0,"Patient data written to: %s\n"%PATIENTS_FILE)

  parser.error("No arguments given")
Example #15
0
# A small function to strip input down to the first phrase or sentence, to
# remove punctuation and captialization, and to replace superfluous whitespace.
# It returns the input as tokenized, capitalized, puctuation devoid words.
def input_tenderizer(input_str):
    # Begin by tokenizing the sentence into individual, whitespace separated
    # components.
    temp = input_str.lower().translate(None, ',.?!:;\'\"').split()

    # Translate second person to first person pronouns.

    # Remove any punctuation from the tokens in temp.
    #for x in temp:
	#    x = x.translate(None, ',.?!:;\'\"').upper()
    # We might be able to terminate the sentence with str.find

    # Return the list.
    return ' '.join(temp)


patient = Patient()
patient.otherPersonName = 'individual'
#var = "Hello, cruel world!"
#print(var)
#while (var != "Die"):

print "Hello " + patient.otherPersonName
while True:
    print '<' + patient.name + '> ' + patient.get_response(input_tenderizer(raw_input('<' + patient.otherPersonName + '> '))).lower().capitalize();

print("Goodbye, cruel world!")
 def add_patient(self):
     name = input("Enter patient name:\n")
     info = input("Enter patient condition:\n")
     self.new_pat_list.append(Patient(name, info))
     print(f"{name} has been added to the patient list.")
     print("")
Example #17
0
 def test_clash_with_one_prescription(self):
     patient = Patient(prescriptions=[Prescription("Codeine", dispense_date=days_ago(days=2), days_supply=2)])
     assert patient.clash(["Codeine"]) == {days_ago(days=2), days_ago(days=1)}
Example #18
0
from config import pid_noConcussion, pid_3stepProtocol, pid_testRetest, pid_concussion, feature_functions, epoch_size, \
    channels
from patient import Patient

# go through each list of ids
for lst in [
        pid_noConcussion, pid_3stepProtocol, pid_testRetest, pid_concussion
]:
    # for each id...
    for pid in lst:
        print("Processing pid: {}".format(pid))
        p = Patient(pid, load_session_examples=False, load_session_raw=True)
        # generate file for pre_test
        if p.pre_test is not None:
            p.pre_test.remove_artifacts()
            p.pre_test.get_examples(feature_functions,
                                    epoch_size=epoch_size,
                                    channels=channels)
            p.pre_test.save_examples()
        if p.post_test is not None:
            # generate file for post_test
            p.post_test.remove_artifacts()
            p.post_test.get_examples(feature_functions,
                                     epoch_size=epoch_size,
                                     channels=channels)
            p.post_test.save_examples()
Example #19
0
                                delimiter=',')
            seis1 = np.loadtxt('seis1_' + save_file_name + '.csv',
                               delimiter=',')
            seis2 = np.loadtxt('seis2_' + save_file_name + '.csv',
                               delimiter=',')
            phono1 = np.loadtxt('phono1_' + save_file_name + '.csv',
                                delimiter=',')
            phono2 = np.loadtxt('phono2_' + save_file_name + '.csv',
                                delimiter=',')

        else:
            # Change directory
            wd = 'data/' + folder_name + '/files_of_interest'

            # Load TDMS file into Patient object
            patient = Patient(wd, file_name)

            # Declare time and signal
            if use_intervals and files[folder_name][dosage][1][
                    "intervals"] != ["None"]:
                start = (start_time -
                         np.min(patient.times)) * patient.frequency
                end = patient.total_time * patient.frequency if end_time == "end" else (
                    end_time - np.min(patient.times)) * patient.frequency
                interval = range(int(start), int(end))
                time, signal, seis1, seis2, phono1, phono2 = patient.get_interval(
                    interval)
            else:
                time = patient.times
                signal = patient.ecg
                seis1 = patient.seis1
Example #20
0
    for i in range(d):
        Q[i, i] = -np.sum(Q[i, :])
    Q_true = Q
    pi0 = np.array([0.5, 0.25, 0.25])

    globalParams = {}

    rate1 = np.random.uniform(0, 2)
    rate2 = np.random.uniform(0, 2)
    rate3 = np.random.uniform(0, 2)
    Q = np.array([[-rate1, rate1 / 2, rate1 / 2],
                  [rate2 / 3, -rate2, 2 * rate2 / 3],
                  [2 * rate3 / 4, 2 * rate3 / 4, -rate3]])
    globalParams['Q'] = Q
    globalParams['numStates'] = 3
    T_max = 5000

    N = 2

    patients = []
    for n in range(N):
        patient = Patient()
        patient.initialize_randomly(Q_true, globalParams, T_max, pi0)
        patients.append(patient)

    for i in range(50):
        globalParams['Q'] = EM_step(globalParams['Q'], patients, globalParams,
                                    pi0)
        print(Q_true)
        print(globalParams['Q'])
Example #21
0
 def __get_patient_object(self, user_id):
     self.__validate_patient_id(user_id)
     self.cursor.execute(PATIENT_DATA, (user_id, ))
     data = self.cursor.fetchone()
     return Patient(data[0], data[1], data[2], data[3])
Example #22
0
                    if self.beds[key] == 0:
                        self.beds[key] = patients[x]
                        patients[x].bedNum = key
                        break  #this breaks so that once it assigns the patient being appended a bed number it doesnt keep looping
            else:
                print 'Hospital is full!!'
        return self

    def discharge(self, patient):
        self.patients.remove(patient)
        self.beds[patient.bedNum] = 0
        patient.bedNum = None
        return self


patient1 = Patient('Faith', ('kindess', 'goofy', 'caring'))
patient2 = Patient('Tiwa', ('cute', 'ratchet', 'loving'))
patient3 = Patient('Yako', ('ratchet', 'tall', 'pseudo english'))
patient4 = Patient('Wura', ('ratchet', 'tall', 'pseudo english'))
patient5 = Patient('Vitali', ('kindess', 'goofy', 'caring'))
patient6 = Patient('Matt', ('cute', 'ratchet', 'loving'))
patient7 = Patient('John', ('ratchet', 'tall', 'pseudo english'))
patient8 = Patient('Ryan', ('ratchet', 'tall', 'pseudo english'))

hospital1 = Hospital('Premier', 5)

hospital1.admit(
    patient1, patient2, patient3
)  #when a patient is admitted he gets a bed and the bedkey become his bed number
hospital1.admit(patient7)
print len(hospital1.patients)
Example #23
0
 def test_days_taking_for_irrelevant_prescription(self):
     patient = Patient(prescriptions=[
         Prescription(
             "Codeine", dispense_date=days_ago(days=2), days_supply=2)
     ])
     assert patient.days_taking("Prozak") == set()
Example #24
0
 def test_clash_with_one_irrelevant_prescription(self):
     patient = Patient(prescriptions=[
         Prescription(
             "Codeine", dispense_date=days_ago(days=2), days_supply=2)
     ])
     assert patient.clash(["Prozac"]) == set()
 def test_days_taking_for_irrelevant_prescription(self):
     patient = Patient(prescriptions=[Prescription("Paracetamol", dispense_date = date.today() - timedelta(days=2), days_supply=2)])
     assert patient.days_taking("Aspirin") == set()
Example #26
0
 def test_clash_with_no_prescriptions(self):
     patient = Patient(prescriptions=[])
     assert patient.clash([]) == set()
Example #27
0
    def load(cls, path):
        # General
        try:
            with zipfile.ZipFile(path, "r") as zip:
                namelist = zip.namelist()

                # Load Settings
                settings_path = next(name for name in namelist
                                     if str.endswith(name, ".settings"))
                settings = None
                try:
                    settings = ProjectPreferences.from_json_file(
                        zip.extract(settings_path))
                except OSError:
                    print("[1/1] Loading of the settings file %s failed" %
                          settings_path)
                else:
                    print("[1/1] Successfully loaded the settings file %s" %
                          settings_path)

                # Load Patients
                patients_path = [
                    name for name in namelist
                    if str.endswith(name, ".patient")
                ]
                patients = []
                for i, patient_path in enumerate(patients_path):
                    try:
                        patients.append(
                            Patient.from_json_file(zip.extract(patient_path)))
                    except OSError:
                        print(
                            "[{0}/{1}] Loading of the patient file {2} failed".
                            format(i + 1, len(patients_path), patient_path))
                    else:
                        print(
                            "[{0}/{1}] Successfully loaded the patient file {2}"
                            .format(i + 1, len(patients_path), patient_path))
                patients = [
                    Patient.from_json_file(zip.extract(patient))
                    for patient in patients_path
                ]

                # Load Groups
                groups_path = [
                    name for name in namelist if str.endswith(name, ".group")
                ]
                groups = []
                for i, group_path in enumerate(groups_path):
                    try:
                        groups.append(
                            Group.from_json_file(zip.extract(group_path),
                                                 patients))
                    except OSError:
                        print("[{0}/{1}] Loading of the group file {2} failed".
                              format(i + 1, len(groups_path), group_path))
                    else:
                        print(
                            "[{0}/{1}] Successfully loaded the group file {2}".
                            format(i + 1, len(groups_path), group_path))

                # Load Protocols
                protocols_path = [
                    name for name in namelist if str.endswith(name, ".prov")
                ]
                protocols = []
                for i, protocol_path in enumerate(protocols_path):
                    try:
                        protocols.append(
                            Protocol.from_json_file(
                                zip.extract(protocol_path)))
                    except OSError:
                        print(
                            "[{0}/{1}] Loading of the protocol file {2} failed"
                            .format(i + 1, len(protocols_path), protocol_path))
                    else:
                        print(
                            "[{0}/{1}] Successfully loaded the protocol file {2}"
                            .format(i + 1, len(protocols_path), protocol_path))

                # Load Datasets
                datasets_path = [
                    name for name in namelist
                    if str.endswith(name, ".dataset")
                ]
                datasets = []
                for i, dataset_path in enumerate(datasets_path):
                    try:
                        datasets.append(
                            Dataset.from_json_file(zip.extract(dataset_path),
                                                   protocols, patients))
                    except OSError:
                        print(
                            "[{0}/{1}] Loading of the dataset file {2} failed".
                            format(i + 1, len(datasets_path), dataset_path))
                    else:
                        print(
                            "[{0}/{1}] Successfully loaded the dataset file {2}"
                            .format(i + 1, len(datasets_path), dataset_path))

                # Load Visualizations
                visualizations_path = [
                    name for name in namelist
                    if str.endswith(name, ".visualization")
                ]
                visualizations = []
                for i, visualization_path in enumerate(visualizations_path):
                    try:
                        visualizations.append(
                            Visualization.from_json_file(
                                zip.extract(visualization_path), patients,
                                datasets))
                    except OSError:
                        print(
                            "[{0}/{1}] Loading of the visualization file {2} failed"
                            .format(i + 1, len(visualizations_path),
                                    visualization_path))
                    else:
                        print(
                            "[{0}/{1}] Successfully loaded the visualization file {2}"
                            .format(i + 1, len(visualizations_path),
                                    visualization_path))

                print("Successfully loaded the project %s " % path)
                return cls(settings, patients, groups, protocols, datasets,
                           visualizations)
        except OSError:
            print("Loading of the project %s failed" % path)
# It has a Queue which is set through the constructor.
# It has a method to add a Patient to the queue.
# It has a method to treat the next patient in the queue.

from patient import Patient
from my_queue import My_Queue


class Hospital:
    def __init__(self, my_queue):
        self.my_queue = my_queue

    def add_a_patient(self, patient):
        self.my_queue.addPatients(patient)

    def treat_next_patient(self):
        next_person = self.my_queue.get_next_person()
        for patient in self.my_queue:
            if patient == next_person:
                patient.treat()


my_queue = My_Queue()
patient1 = Patient('Lucy', 38, 'female')
patient2 = Patient('Nicy', 56, 'female')
my_queue.addPatients(patient1)
hospital = Hospital(my_queue)

hospital.add_a_patient(patient2)
hospital.treat_next_patient()
Example #29
0
class TestPatient(unittest.TestCase):
    def setUp(self):
        from datetime import date

        self.day = date(2018, 2, 1)
        self.appt_fields = ['Date', 'Appt Time', 'Type']
        self.patient_fields = [
            'MRN', 'Patient', 'Pref Language', 'Mobile #', 'Pt. E-mail Address'
        ]
        self.patient = Patient(self.patient_fields, [
            1445578838, 'patient_name', 'English', '333-333-3333',
            '*****@*****.**'
        ])

    def reset_appts(self):
        self.patient.appts = {}

    def test_non_english_speaking_patient(self):
        self.patient.language = 'French'
        assert not self.patient.enrolled(self.day)
        assert self.patient.care_tour == None
        self.patient.language = 'English'

    def test_patient_with_past_appt(self):
        self.patient.add_appt(self.appt_fields,
                              ['1/20/18', '9:00 AM', 'THORACENTESIS'])
        assert not self.patient.enrolled(self.day)
        assert self.patient.care_tour == None

    def test_patient_with_appt_tmrw(self):
        self.patient.add_appt(self.appt_fields,
                              ['2/2/18', '10:00 AM', 'PLEURX'])
        assert not self.patient.enrolled(self.day)
        assert self.patient.care_tour == None
        self.reset_appts()

    def test_patient_with_appt(self):
        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '10:00 AM', 'FLEX BRONCH WITH BAL'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 1
        self.reset_appts()

        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '11:00 AM', 'TRANS BRONCH BX'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 1
        self.reset_appts()

        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '12:00 PM', 'THORACENTESIS'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 3
        self.reset_appts()

        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '1:00 PM', 'PLEURX'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 4
        self.reset_appts()

    def test_patient_with_same_day_appts(self):
        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '9:00 AM', 'THORACENTESIS'])
        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '10:00 AM', 'FLEX BRONCH WITH BAL'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 1
        self.reset_appts()

        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '9:00 AM', 'TRANS BRONCH BX'])
        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '10:00 AM', 'PLEURX'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 1
        self.reset_appts()

        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '9:00 AM', 'THORACENTESIS'])
        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '10:00 AM', 'PLEURX'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 3
        self.reset_appts()

        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '9:00 AM', 'PLEURX'])
        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '10:00 AM', 'FLEX BRONCH WITH BAL'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 4
        self.reset_appts()

    def test_patient_with_diff_day_appts(self):
        self.patient.add_appt(self.appt_fields,
                              ['2/3/18', '9:00 AM', 'THORACENTESIS'])
        self.patient.add_appt(self.appt_fields,
                              ['2/4/18', '8:00 AM', 'FLEX BRONCH WITH BAL'])
        assert self.patient.enrolled(self.day)
        assert self.patient.care_tour == 3
        self.reset_appts()
Example #30
0
 def test_clash_with_no_prescription(self):
     patient = Patient(prescriptions=[])
     assert patient.clash([]) == set()
def patients():
    st.header('PATIENTS')
    option_list = ['', 'Add patient', 'Update patient', 'Delete patient', 'Show complete patient record', 'Search patient']
    option = st.sidebar.selectbox('Select function', option_list)
    p = Patient()
    if (option == option_list[1] or option == option_list[2] or option == option_list[3]) and verify_edit_mode_password():
        if option == option_list[1]:
            st.subheader('ADD PATIENT')
            p.add_patient()
        elif option == option_list[2]:
            st.subheader('UPDATE PATIENT')
            p.update_patient()
        elif option == option_list[3]:
            st.subheader('DELETE PATIENT')
            try:
                p.delete_patient()
            except sql.IntegrityError:      # handles foreign key constraint failure issue (due to integrity error)
                st.error('This entry cannot be deleted as other records are using it.')
    elif option == option_list[4]:
        st.subheader('COMPLETE PATIENT RECORD')
        p.show_all_patients()
    elif option == option_list[5]:
        st.subheader('SEARCH PATIENT')
        p.search_patient()
Example #32
0
        centroids = vertices[:,:3]
        positions = np.repeat(centroids, centroids.shape[0],axis=0) - np.vstack([centroids]*centroids.shape[0])
        #contrasts = np.repeat(intensities, intensities.shape[0],axis=0) / np.vstack([intensities]*intensities.shape[0])
        #ratios = np.repeat(sizes, sizes.shape[0],axis=0) / np.vstack([sizes]*sizes.shape[0])
        # Assemble relational attributes as the edges matrix
        edges = positions#np.concatenate([positions, contrasts, ratios],axis=-1)

        # Initializing and returning the SRG
        return SRG(vertices, edges, ["centroid_x", "centroid_y", "centroid_z", "intensity", "size"], ["position"])#, "contrast", "ratio"])


#if __name__ == '__main__':
# Step 1: Loading data
# -----------------------
print("# Step 1: Loading data")
model_patient = Patient.build_from_folder("data/4")
model_volume, model_labelmap = model_patient.volumes['t2'], model_patient.labelmaps['t2']
# Reconfiguring model_labelmap with extra backgrounds and unified liver
model_labelmap.data += 2 # Adding space for the extra labels at the start
model_labelmap.data[np.logical_and(model_volume.data < 10, model_labelmap.data == 2)] = 0 # posterior background is 0
model_labelmap.data[model_labelmap.data.shape[1]//2:,:,:][model_labelmap.data[model_labelmap.data.shape[1]//2:,:,:] == 0] = 1 # anterior background is 1
model_labelmap.data[model_labelmap.data >= 4] = 4
model_labelmap.header["num_labels"] = 5
display_overlayed_volume(model_volume.data, model_labelmap.data, label_colors=[(0,0,0),(0.5,0.5,0.5),(1,1,1),(0,0,1),(1,0,0)], title="Model")

observation_volume = deepcopy(model_volume)

# Step 2: Generating model graph
# -----------------------
print("# Step 2: Generating model graph")
model_graph = build_graph(model_volume, model_labelmap)
 def add_patient(self):
     self._canvas.destroy()
     self.heading.destroy()
     Patient(self.root)
    ax = fig.add_subplot(111)

    for i, medicine_name in enumerate(medicine_names):
        prescriptions = filter(lambda p: p.name == medicine_name, patient.prescriptions)
        days_to_plot = [(p.dispense_date.toordinal() - day_zero, p.days_supply) for p in prescriptions]
        ax.broken_barh(days_to_plot, (10 * (i + 1), 9), facecolors=["red", "yellow", "green", "blue", "orange"][i])

    days_taking_all = []
    days_taking_all = sorted([(day.toordinal() - day_zero, 1) for day in patient.clash(medicine_names)])
    ax.broken_barh(days_taking_all, (0, 9), facecolors="black")

    medicine_count = len(medicine_names)
    ax.set_ylim(-5, 10 * (medicine_count + 1) + 5)
    ax.set_xlim(-110, 0)
    ax.set_xlabel("relative to today")
    ax.set_yticks([i * 10 + 5 for i in range(medicine_count + 2)])
    ax.set_yticklabels(["Clash"] + medicine_names)
    ax.grid(True)

    fig.savefig(filename)


if __name__ == "__main__":
    patient = Patient()
    patient.add_prescription(Prescription("Fluoxetine", dispense_date=date.today() - timedelta(100), days_supply=20))
    patient.add_prescription(Prescription("Fluoxetine", dispense_date=date.today() - timedelta(50), days_supply=50))
    patient.add_prescription(Prescription("Codeine", dispense_date=date.today() - timedelta(15), days_supply=3))
    patient.add_prescription(Prescription("Codeine", dispense_date=date.today() - timedelta(60), days_supply=3))
    patient.add_prescription(Prescription("Codeine", dispense_date=date.today() - timedelta(30), days_supply=3))
    plot(patient, ["Fluoxetine", "Codeine"], "graph.png")
Example #35
0
from patient import Patient

patient = Patient()

patient.fill()
patient.show()
patient.diagnose()
Example #36
0
    def setUp(self):
        config = {
            "host": "localhost",
            "user": "******",
            "passwd": "MyNewPassword",
            "database": "test",
            "auth_plugin": "mysql_native_password"
        }
        self.connection = mysql.connector.connect(**config)

        self.cursor = self.connection.cursor()

        stmt_p = "SHOW TABLES LIKE 'patients'"
        self.cursor.execute(stmt_p)
        result = self.cursor.fetchone()
        if not result:
            self.cursor.execute("""CREATE TABLE `patients` (
                    `id` int NOT NULL AUTO_INCREMENT,
                    `name` varchar(250) NOT NULL,
                    `clinical_area` varchar(100) DEFAULT NULL,
                    `bed_num` int DEFAULT NULL,
                    `acuity` int DEFAULT NULL,
                    `a_trained` tinyint(1) DEFAULT NULL,
                    `transfer` tinyint(1) DEFAULT NULL,
                    `iv` tinyint(1) DEFAULT NULL,
                    `one_to_one` tinyint(1) DEFAULT NULL,
                    `previous_nurses` varchar(250) DEFAULT NULL,
                    `admission_date` varchar(250) DEFAULT NULL,
                    `discharged_date` varchar(250) DEFAULT '-',
                    `comments` varchar(250) DEFAULT NULL,
                    `twin` int DEFAULT NULL,
                    PRIMARY KEY (`id`))""")

        stmt_n = "SHOW TABLES LIKE 'nurses'"
        self.cursor.execute(stmt_n)
        result = self.cursor.fetchone()
        if not result:
            self.cursor.execute("""CREATE TABLE `nurses` (
                    `id` int NOT NULL AUTO_INCREMENT,
                    `name` varchar(250) DEFAULT '',
                    `clinical_area` varchar(250) DEFAULT NULL,
                    `bed_num` int DEFAULT NULL,
                    `rotation` varchar(250) DEFAULT NULL,
                    `group_num` int DEFAULT NULL,
                    `fte` decimal(3,2) DEFAULT NULL,
                    `skill_level` int DEFAULT NULL,
                    `a_trained` tinyint(1) DEFAULT NULL,
                    `transfer` tinyint(1) DEFAULT NULL,
                    `iv` int DEFAULT NULL,
                    `advanced_role` varchar(250) DEFAULT NULL,
                    `previous_patients` varchar(250) DEFAULT NULL,
                    `dta` varchar(250) DEFAULT '',
                    `comments` varchar(250) DEFAULT '',
                    `priority` int DEFAULT NULL,
                    `current_shift` tinyint(1) DEFAULT NULL,
                    PRIMARY KEY (`id`)) """)

        self.cursor.execute("""INSERT INTO patients VALUES
            (1,'not discharged','F',3,5,1,0,0,0,'[2,5]','2020-11-18','-','',0),
            (2,'patient discharged','F',6,3,1,0,1,1,'[]','2020-11-18','2020-11-20','',1)"""
                            )

        self.cursor.execute("""INSERT INTO nurses VALUES
            (1,'Rebecca Smith','B',1,'A01',1,0.50,
             5,0,0,3,'None','[27]','','',2,1),
            (2,'Holly Baker','D',3,'3',1,1.00,4,1,1,2,'Charge','[1, 2, 21]','1','1',2,0)"""
                            )

        self.assignments = {}
        self.patients = []
        self.nurses = []
        self.twins = []

        self.patient_1 = Patient(1, 'not discharged', 'F', 3, 5, 1, 0, 0, 0,
                                 '[2,5]', '2020-11-18', '-', '', 0)
        self.patient_1.assigned = 0

        self.patient_2 = Patient(2, 'patient discharged', 'F', 6, 3, 1, 0, 1,
                                 1, '[]', '2020-11-18', '2020-11-20', '', 1)
        self.patient_2.assigned = 1

        self.current_n = Nurse(1, 'Rebecca Smith', 'B', 1, 'A01', 1, 0.50, 5,
                               0, 0, 3, 'None', '[27]', '', '', 2, 1)
Example #37
0
def main():

    parser = argparse.ArgumentParser(description='Infers the evolution of cancer.')

    parser.add_argument("-m", "--mode", help="running mode: 1...run Treeomics and explore full solution space, "
                                             "2...fast (one mutation pattern per variant).",
                        type=int, default=1)

    group = parser.add_mutually_exclusive_group()
    group.add_argument("--csv_file", help="path to the CSV file", type=str)
    group.add_argument("-v", "--vcf_file", help="path to the VCF file", type=str)
    group.add_argument("-d", "--directory", help="directory with multiple VCF files", type=str)

    parser.add_argument("-n", "--normal", help="names of normal samples (excluded from analysis)", type=str, nargs='*',
                        default=None)

    parser.add_argument("--suffix", help="suffix for output files", type=str, default=None)

    parser.add_argument("-x", "--exclude", help="names of samples to exclude of analysis", type=str, nargs='*',
                        default=None)
    parser.add_argument("--include", help="names of samples to include in analysis", type=str, nargs='*',
                        default=None)

    parser.add_argument("--purities", help="provide estimated purities for samples given by '--include <SAMPLES>'",
                        type=float, nargs='*', default=None)

    parser.add_argument("-r", "--mut_reads", help="path table with the number of reads with a mutation", type=str)
    parser.add_argument("-s", "--coverage", help="path to table with read coverage at the mutated positions", type=str)

    # specify output directory
    parser.add_argument("-o", "--output", help="output directory", type=str, default=settings.OUTPUT_FOLDER)

    # read in parameter value
    parser.add_argument("-e", "--error_rate", help="sequencing error rate for bayesian inference",
                        type=float, default=settings.BI_E)
    parser.add_argument("-z", "--prob_zero", help="prior probability of being absent",
                        type=float, default=settings.BI_C0)
    parser.add_argument("-a", "--max_absent_vaf", help="maximal absent vaf before considering purity",
                        type=float, default=settings.MAX_ABSENT_VAF)

    parser.add_argument("-c", "--min_median_coverage",
                        help="minimum median coverage of a sample",
                        type=int, default=settings.SAMPLE_COVERAGE_THRESHOLD)
    parser.add_argument("-f", "--min_median_vaf",
                        help="minimum median mutant allele frequency of a sample",
                        type=float, default=settings.MAF_THRESHOLD)

    parser.add_argument('-g', "--ref_genome",
                        help="to which reference genome was the sequencing data aligned",
                        type=str, default=settings.REF_GENOME)

    parser.add_argument('--wes_filtering', action='store_true', help="Remove intronic and intergenic variants?")

    # settings for filtering potential germline variants
    parser.add_argument(
        "--mut_reads_normal_th", type=float, default=settings.MUT_READS_NORMAL_TH,
        help="variants are excluded if they reach this number of mutant reads with a given VAF in the normal sample")
    parser.add_argument(
        "--vaf_normal_th", type=float, default=settings.VAF_NORMAL_TH,
        help="variants are excluded if they reach this VAF with a given number of mutant reads in the normal sample")

    parser.add_argument('--driver_genes', type=str, default=settings.DRIVER_PATH,
                        help='path to CSV file with names of putative driver genes highlighted in inferred phylogeny')

    parser.add_argument('--common_vars_file', type=str, default=settings.COMMON_VARS_FILE,
                        help='path to CSV file with common variants present in normal samples '
                             'and hence excl as artifacts')

    # DEPRECATED FROM VERSION 1.7.0 ONWARD
    parser.add_argument("-p", "--false_positive_rate",
                        help="false positive rate for the statistical test",
                        type=float, default=settings.FPR)
    parser.add_argument("-i", "--false_discovery_rate",
                        help="false discovery rate for the statistical test",
                        type=float, default=settings.FDR)
    parser.add_argument("-y", "--min_absent_coverage",
                        help="minimum coverage for a true negative (negative threshold)",
                        type=int, default=settings.MIN_ABSENT_COVERAGE)

    parser.add_argument('--verbose', action='store_true', help="Run Treeomics in DEBUG logging level.")

    plots_parser = parser.add_mutually_exclusive_group(required=False)
    plots_parser.add_argument('--plots', dest='plots', action='store_true', help="Is plot generation enabled?")
    plots_parser.add_argument('--no_plots', dest='plots', action='store_false', help="Is plot detection disabled?")
    parser.set_defaults(plots=True)

    plots_parser = parser.add_mutually_exclusive_group(required=False)
    plots_parser.add_argument('--tikztrees', dest='tikztrees', action='store_true', help='Generate Tikz trees?')
    plots_parser.add_argument('--no_tikztrees', dest='tikztrees', action='store_false', help='Generate Tikz trees?')
    parser.set_defaults(tikztrees=True)

    parser.add_argument('--benchmarking', action='store_true', help="Generate mutation matrix for benchmarking.")

    parser.add_argument('-b', '--boot', help='Number of bootstrapping samples', type=int,
                        default=settings.NO_BOOTSTRAP_SAMPLES)
    parser.add_argument('--pool_size', help='number of best solutions explored by ILP solver', type=int,
                        default=settings.POOL_SIZE)

    # limit search space exploration to decrease the run time
    parser.add_argument("-t", "--time_limit",
                        help="maximum running time for CPLEX to solve the MILP",
                        type=int, default=settings.TIME_LIMIT)
    parser.add_argument('--threads', help='maximal number of parallel threads that will be invoked by CPLEX',
                        type=int, default=0)
    parser.add_argument("-l", "--max_no_mps", help="limit the solution space size by the maximal number of " +
                                                   "explored mutation patterns per variant",
                        type=int, default=settings.MAX_NO_MPS)

    feature_parser = parser.add_mutually_exclusive_group(required=False)
    feature_parser.add_argument('-u', '--subclone_detection', dest='subclone_detection', action='store_true',
                                help="Is subclone detection enabled?")
    feature_parser.add_argument('--no_subclone_detection', dest='subclone_detection', action='store_false',
                                help="Is subclone detection disabled?")
    parser.set_defaults(subclone_detection=settings.SUBCLONE_DETECTION)

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)
        logger.info('Run Treeomics in verbose mode.')

    # disable plot generation if the script is called from another script (benchmarking)
    # set log level to info
    if not os.getcwd().endswith('treeomics') and not os.getcwd().endswith('src'):
        # logger.setLevel(logging.INFO)
        fh.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)

    if args.subclone_detection:
        logger.info('Subclone detection is enabled.')
    else:
        logger.info('Subclone detection is disabled.')

    if args.plots:
        logger.info('Plot generation is enabled.')
    else:
        logger.info('Plot generation is disabled.')

    plots_report = args.plots    # for debugging set to False
    plots_paper = logger.isEnabledFor(logging.DEBUG)
    # plots_report = False  # for debugging set to False
    # plots_paper = False

    if args.normal:
        normal_sample_name = args.normal[0]
        excluded_samples = set(args.normal[1:]) if len(args.normal) > 1 else set()
        logger.info('Exclude normal sample with name: ' + ', '.join(sa for sa in args.normal))

    else:
        normal_sample_name = None
        excluded_samples = set()

    # exclude given sample names from analysis
    if args.exclude is not None:
        for sample_name in args.exclude:
            logger.info('Exclude sample from analysis: {}'.format(sample_name))
            excluded_samples.add(sample_name)

    # exclude given sample names from analysis
    if args.include is not None:
        included_samples = list()
        for sample_name in args.include:
            included_samples.append(sample_name)

        logger.info('Only the following samples are included in the analysis: {}'.format(
            ', '.join(sa_name for sa_name in included_samples)))
    else:
        included_samples = None

    if args.purities is not None:
        if included_samples is None or len(included_samples) != len(args.purities):
            raise ValueError('If externally estimated purity values are provided, the same number of samples must be '
                             'given by "--input <SAMPLE NAMES>"')
        purities = dict()
        for sample_name, purity in zip(included_samples, args.purities):
            purities[sample_name] = purity

    else:
        purities = None

    if args.min_median_coverage > 0:
        logger.info('Minimum sample median coverage (otherwise discarded): {}'.format(
            args.min_median_coverage))
    if args.min_median_vaf > 0:
        logger.info('Minimum sample median mutant allele frequency (otherwise discarded): {}'.format(
            args.min_median_vaf))

    if args.boot is not None:
        if args.boot < 0:
            raise AttributeError('Number of bootstrapping samples can not be negative!')

    if args.time_limit is not None:
        if args.time_limit <= 0:
            raise AttributeError('Time limit for the MILP solver needs to be positive!')
        logger.info('MILP solver running time is limited to {} seconds. Obtained solution may not be optimal.'.format(
            args.time_limit))

    if args.max_no_mps is not None:
        if args.max_no_mps <= 0:
            raise AttributeError('Solution space can only be limited to a positive number of mutation patterns!')
        logger.info('Solution space is limited to the {} most likely mutation patterns per variant.'.format(
            args.max_no_mps))

    if args.max_absent_vaf < args.error_rate:
        raise AttributeError('The maximal absent VAF has to be larger than the error rate in the Bayesian model!')

    if args.pool_size < 1:
        raise AttributeError('Solution pool size of ILP solver has to be at least 1!')

    # DEPRECATED FROM VERSION 1.7.0 ONWARD
    fpr = args.false_positive_rate
    logger.debug('False positive rate for the statistical test: {}.'.format(fpr))
    fdr = args.false_discovery_rate
    logger.debug('False discovery rate for the statistical test: {}.'.format(fdr))
    min_absent_cov = args.min_absent_coverage
    logger.debug('Minimum coverage for an absent variant: {} (otherwise unknown)'.format(min_absent_cov))

    ref_genome = args.ref_genome.lower() if args.ref_genome is not None else None
    logger.info('Sequencing data was aligned to reference genome: {}'.format(ref_genome))

    if args.boot > 0:
        logger.info('Number of samples for bootstrapping analysis: {}'.format(args.boot))

    if args.threads > 0:
        logger.info('The maximal number of parallel threads that will be invoked by CPLEX has been set to {}.'.format(
            args.threads))

    if args.subclone_detection and args.max_no_mps is not None:
        logger.error('Subclone and partial solution space search are not supported to be performed at the same time! ')
        usage()

    if os.getcwd().endswith('treeomics'):
        # application has been started from this directory
        input_directory = os.path.join('..')
    else:
        input_directory = os.path.join('.')

    if args.wes_filtering:
        logger.info('All intronic or intergenic variants will be excluded (WES filtering).')

    if args.common_vars_file:
        common_vars_filepath = os.path.join(input_directory, args.common_vars_file)
        if os.path.isfile(common_vars_filepath):
            common_vars = read_table(common_vars_filepath, ['Chromosome', 'Position', 'RefAllele', 'AltAllele'],
                                     ['__', '__', '>', ''], ['AlleleFrequency', 'Gene_Symbol'])
            logger.info('Read common variants file with {:.2e} variants that are excluded as artifacts.'.format(
                len(common_vars)))

        else:
            logger.error('Given path to common variants file {} could not be found!'.format(args.common_vars_file))
            common_vars = None
    else:
        common_vars = None

    # ##########################################################################################################
    # ############################################### LOAD DATA ################################################
    # ##########################################################################################################

    # take mutant read and coverage tables to calculate positives, negatives, and unknowns
    if args.mut_reads and args.coverage:

        patient_name = get_patients_name(args.mut_reads)
        if patient_name.find('_') != -1:
            patient_name = patient_name[:patient_name.find('_')]
        logger.debug('Patient name: {}'.format(patient_name))
        patient = Patient(error_rate=args.error_rate, c0=args.prob_zero, max_absent_vaf=args.max_absent_vaf,
                          pat_name=patient_name, min_absent_cov=min_absent_cov, reference_genome=ref_genome,
                          purities=purities)
        read_no_samples = patient.process_raw_data(
            fpr, fdr, min_absent_cov, args.min_median_coverage, args.min_median_vaf,
            mut_reads_normal_th=args.mut_reads_normal_th, vaf_normal_th=args.vaf_normal_th, var_table=args.mut_reads,
            cov_table=args.coverage, normal_sample=normal_sample_name, excluded_columns=excluded_samples,
            considered_samples=included_samples, wes_filtering=args.wes_filtering, artifacts=common_vars)

    elif args.csv_file:

        patient_name = get_patients_name(args.csv_file)
        if patient_name.find('_') != -1:
            patient_name = patient_name[:patient_name.find('_')]
        logger.debug('Patient name: {}'.format(patient_name))
        patient = Patient(error_rate=args.error_rate, c0=args.prob_zero, max_absent_vaf=args.max_absent_vaf,
                          pat_name=patient_name, min_absent_cov=min_absent_cov, reference_genome=ref_genome,
                          purities=purities)
        read_no_samples = patient.process_raw_data(
            fpr, fdr, min_absent_cov, args.min_median_coverage, args.min_median_vaf,
            mut_reads_normal_th=args.mut_reads_normal_th, vaf_normal_th=args.vaf_normal_th, csv_file=args.csv_file,
            normal_sample=normal_sample_name, excluded_columns=excluded_samples, considered_samples=included_samples)

    elif args.vcf_file:      # take path to the input VCF file
        vcf_file = args.vcf_file
        if not os.path.isfile(vcf_file):
            logger.error("Provided VCF file {} does not exist.".format(vcf_file))
            usage()

        patient_name = get_patients_name(vcf_file)
        if patient_name.find('_') != -1:
            patient_name = patient_name[:patient_name.find('_')]
        patient = Patient(error_rate=args.error_rate, c0=args.prob_zero, max_absent_vaf=args.max_absent_vaf,
                          pat_name=patient_name, reference_genome=ref_genome, purities=purities)
        read_no_samples = patient.read_vcf_file(
            vcf_file, fpr, fdr, min_sa_cov=args.min_median_coverage, min_sa_maf=args.min_median_vaf,
            min_absent_cov=args.min_absent_coverage, normal_sample_name=normal_sample_name,
            excluded_samples=excluded_samples, considered_samples=included_samples, wes_filtering=args.wes_filtering,
            artifacts=common_vars)

    elif args.directory:      # take path to the directory with all VCF files
        vcf_directory = args.directory

        if not os.path.isdir(vcf_directory):
            logger.error("Directory named {} does not exist ({}).".format(
                args.directory, os.path.abspath(vcf_directory)))
            usage()

        patient_name = get_patients_name(
            vcf_directory[:-1] if vcf_directory.endswith('/') else vcf_directory)
        patient = Patient(error_rate=args.error_rate, c0=args.prob_zero, max_absent_vaf=args.max_absent_vaf,
                          pat_name=patient_name, reference_genome=ref_genome, purities=purities)
        read_no_samples = patient.read_vcf_directory(
            vcf_directory, args.min_median_coverage, args.min_median_vaf, fpr, fdr, min_absent_cov,
            normal_sample_name=normal_sample_name, excluded_samples=excluded_samples,
            considered_samples=included_samples, wes_filtering=args.wes_filtering, artifacts=common_vars)

    else:
        raise RuntimeError('No input files were provided!')

    output_directory = init_output(patient_name=patient_name,
                                   output_dir=args.output if args.output is not settings.OUTPUT_FOLDER else None)

    # find and characterize all possible driver gene mutations
    # if settings.DRIVER_PATH is not None:
    #     driver_filepath = os.path.join(input_directory, settings.DRIVER_PATH)
    # else:
    #     driver_filepath = None
    if args.driver_genes:
        driver_filepath = args.driver_genes
        if not os.path.isfile(driver_filepath):
            driver_filepath = os.path.join(input_directory, args.driver_genes)
            if not os.path.isfile(driver_filepath):
                logger.warning('CSV-file with driver gene names for annotation was not found: {}'.format(
                    os.path.abspath(args.driver_genes)))
                driver_filepath = None

    if settings.CGC_PATH is not None:
        cgc_filepath = os.path.join(input_directory, settings.CGC_PATH)
    else:
        cgc_filepath = None

    put_driver_genes, put_driver_vars, unlikely_driver_mut_effects = characterize_drivers(
        patient, ref_genome, driver_filepath, cgc_filepath,
        output_filepath=os.path.join(
            output_directory, get_output_fn_template(patient.name, read_no_samples, suffix=args.suffix))
        + '_putativedrivers')

    # create output filename pattern
    fn_pattern = get_output_fn_template(
        patient.name, read_no_samples, mode=args.mode, min_sa_coverage=args.min_median_coverage,
        min_sa_vaf=args.min_median_vaf, bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0,
        max_absent_vaf=patient.max_absent_vaf, suffix=args.suffix)

    # do basic analysis on provided input data
    # create output file path for present posterior probabilities of the variants
    post_filepath = os.path.join(output_directory, get_output_fn_template(
        patient.name, read_no_samples, min_sa_coverage=args.min_median_coverage, min_sa_vaf=args.min_median_vaf,
        bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0, max_absent_vaf=patient.max_absent_vaf, suffix=args.suffix)
        + '_posterior.txt')

    # output file path to write all variants in a format acceptable to Ensembl VEP and CHASM/CRAVAT
    vep_filepath = (os.path.join(
        output_directory, get_output_fn_template(patient.name, read_no_samples, suffix=args.suffix))
        + '_func_variants_vep.tsv')
    cravat_filepath = (os.path.join(
        output_directory, get_output_fn_template(patient.name, read_no_samples, suffix=args.suffix))
        + '_func_variants_cravat.tsv')
    analyze_data(patient, post_table_filepath=post_filepath,
                 vep_filepath=vep_filepath, cravat_filepath=cravat_filepath)

    # write matrix with Jaccard similarity coefficients in CSV format
    fp_jsc = os.path.join(
        output_directory,
        get_output_fn_template(patient.name, read_no_samples,
                               min_sa_coverage=args.min_median_coverage, min_sa_vaf=args.min_median_vaf,
                               bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0, max_absent_vaf=patient.max_absent_vaf,
                               suffix=args.suffix)) + '_jsc.csv'
    with open(fp_jsc, 'w') as f_jsc:
        jsc_title = \
            ('# {}: Probabilistic Jaccard similarity coefficient between'.format(patient.name)
             + ' all pairs of samples (median: {:.2f}; mean: {:.2f}; classification threshold: {:.0%}).\n'.format(
             np.nanmedian(patient.df_bi_sim_coeff), np.nanmean(patient.df_bi_sim_coeff), def_sets.CLA_CONFID_TH))
        f_jsc.write(jsc_title)
    patient.df_bi_sim_coeff.to_csv(fp_jsc, index=True, mode='a')

    # write matrix with genetic distances in CSV format
    fp_gen_dist = os.path.join(
        output_directory,
        get_output_fn_template(patient.name, read_no_samples,
                               min_sa_coverage=args.min_median_coverage, min_sa_vaf=args.min_median_vaf,
                               bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0, max_absent_vaf=patient.max_absent_vaf,
                               suffix=args.suffix)) + '_gendist.csv'
    with open(fp_gen_dist, 'w') as f_gen_dist:
        gen_dist_title = \
            ('# {}: Probabilistic genetic distance between'.format(patient.name)
             + ' all pairs of samples (median: {:.2f}; mean: {:.2f}; classification threshold: {:.0%}).\n'.format(
             np.nanmedian(patient.df_bi_gen_dist), np.nanmean(patient.df_bi_gen_dist), def_sets.CLA_CONFID_TH))
        f_gen_dist.write(gen_dist_title)
    patient.df_bi_gen_dist.to_csv(fp_gen_dist, index=True, mode='a')

    if plots_report:   # deactivate plot generation for debugging and benchmarking

        # generate mutation table plot
        # show only mutations which are present in at least one sample
        if patient.gene_names is not None:
            col_labels = patient.gene_names
        else:
            col_labels = patient.mut_keys

        if len(col_labels) < def_sets.MAX_MUTS_TABLE_PLOT and plots_report:
            mut_table_name = \
                (get_output_fn_template(
                    patient.name, read_no_samples, min_sa_coverage=args.min_median_coverage,
                    min_sa_vaf=args.min_median_vaf, bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0,
                    max_absent_vaf=patient.max_absent_vaf, suffix=args.suffix)+'_bayesian_data_table')

            plts.bayesian_hinton(patient.log_p01, output_directory, mut_table_name,
                                 row_labels=patient.sample_names, column_labels=col_labels,
                                 displayed_mutations=patient.present_mutations, put_driver_vars=put_driver_vars)
        else:
            logger.warning('Too many reported variants for a detailed mutation table plot: {}'.format(len(col_labels)))
            mut_table_name = None
    else:
        mut_table_name = None
        col_labels = None

    if plots_paper:     # deactivate plot generation for regular analysis
        # generate violin coverage distribution plot
        plts.coverage_plot(os.path.join(output_directory, patient.name+'_coverage_distr.pdf'), patient)

        # generate box plots with MAFs per sample
        # plts.boxplot(os.path.join(output_directory, 'fig_mafs_'+patient.name+'.pdf'), patient)
        # generate violin VAF distribution plot
        plts.vaf_distribution_plot(os.path.join(output_directory, patient.name+'_vaf_distr.pdf'), patient)

    # create raw data analysis file
    analysis.create_data_analysis_file(patient, os.path.join(output_directory, fn_pattern+'_data.txt'))
    # utils.analysis.print_genetic_distance_table(patient)

    # create output filename pattern
    fn_pattern = get_output_fn_template(
        patient.name, read_no_samples, mode=args.mode, min_sa_coverage=args.min_median_coverage,
        min_sa_vaf=args.min_median_vaf, bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0,
        max_absent_vaf=patient.max_absent_vaf, max_no_mps=args.max_no_mps, suffix=args.suffix)

    # create HTML analysis report
    html_report = HTMLReport(os.path.join(output_directory, fn_pattern+'_report.html'), patient_name)
    html_report.start_report()
    html_report.add_sequencing_information(
        patient, mut_table_path=mut_table_name+'.png' if mut_table_name is not None else None,
        put_driver_vars=put_driver_vars, unlikely_driver_mut_effects=unlikely_driver_mut_effects,
        put_driver_genes=put_driver_genes)

    html_report.add_similarity_information(patient)

    # ############################################################################################
    # infer evolutionary compatible mutation patterns and subsequently evolutionary trees based on
    # different principles divided into three modes
    # ############################################################################################
    if args.mode == 1 or args.mode == 2:

        phylogeny = None
        # comp_node_frequencies = None

        # ### RUN TREEOMICS ###
        if args.mode == 1:     # find likely sequencing artifacts based on a bayesian inference model

            # generate filename for tree
            fn_tree = get_output_fn_template(
                patient.name, read_no_samples, subclone_detection=args.subclone_detection,
                min_sa_coverage=args.min_median_coverage, min_sa_vaf=args.min_median_vaf, no_boot=args.boot,
                max_no_mps=args.max_no_mps, bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0,
                max_absent_vaf=patient.max_absent_vaf, mode=args.mode, suffix=args.suffix)
            fn_matrix = get_output_fn_template(
                patient.name, read_no_samples, subclone_detection=args.subclone_detection,
                min_sa_coverage=args.min_median_coverage, min_sa_vaf=args.min_median_vaf, max_no_mps=args.max_no_mps,
                bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0, max_absent_vaf=patient.max_absent_vaf, mode=args.mode,
                suffix=args.suffix)

            # create mutation matrix and mutation patterns output file for automatic benchmarking
            if args.benchmarking:
                mm_filepath = os.path.join(output_directory, fn_matrix+'_treeomics_mm.csv')
                mp_filepath = os.path.join(output_directory, fn_matrix+'_treeomics_mps.tsv')
            else:
                mm_filepath = None
                mp_filepath = None

            # infer maximum likelihood tree
            phylogeny = ti.create_max_lh_tree(
                patient, tree_filepath=os.path.join(output_directory, fn_tree+'_mlhtree'),
                mm_filepath=mm_filepath, mp_filepath=mp_filepath,
                subclone_detection=args.subclone_detection, loh_frequency=settings.LOH_FREQUENCY,
                driver_vars=put_driver_vars, pool_size=args.pool_size,
                no_plotted_solutions=settings.NO_PLOTTED_SOLUTIONS, no_bootstrap_samples=args.boot,
                max_no_mps=args.max_no_mps, time_limit=args.time_limit, n_max_threads=args.threads, plots=plots_report,
                tikztrees=args.tikztrees, variant_filepath=os.path.join(output_directory, fn_tree+'_variants.csv'))

            # previously used for benchmarking
            # if plots_paper:     # generate Java Script D3 trees
            #     json_file = 'mlhtree_'+fn_tree+'.json'
            #     Phylogeny.save_json_tree(os.path.join(output_directory, json_file), phylogeny.mlh_tree)
            #     Phylogeny.write_html_file(os.path.join(output_directory, 'mlhtree_'+fn_tree+'.html'), json_file)

            # determine mutation patterns and generate an overview graph
            if plots_report:
                if phylogeny.tree_plot is not None:     # add generated tree plot to HTML report
                    html_report.add_tree_plot(patient, phylogeny)

                # create mutation pattern overview plot
                # show only the different patterns and not the individual variants
                # (convenient for large numbers of variants)
                fn_mp_plot = get_output_fn_template(
                    patient.name, read_no_samples, min_sa_coverage=args.min_median_coverage, max_no_mps=args.max_no_mps,
                    min_sa_vaf=args.min_median_vaf, bi_e=patient.bi_error_rate, bi_c0=patient.bi_c0,
                    max_absent_vaf=patient.max_absent_vaf, mode=args.mode, suffix=args.suffix)

                if args.subclone_detection:
                    pg = ti.create_max_lh_tree(
                        patient, tree_filepath=None, mm_filepath=None, mp_filepath=None, subclone_detection=False,
                        loh_frequency=settings.LOH_FREQUENCY, driver_vars=put_driver_vars, pool_size=args.pool_size,
                        no_plotted_solutions=settings.NO_PLOTTED_SOLUTIONS, no_bootstrap_samples=0,
                        max_no_mps=args.max_no_mps, time_limit=args.time_limit, plots=False, tikztrees=False)
                else:
                    pg = phylogeny

                mp_graph_name = mp_graph.create_mp_graph(
                    fn_mp_plot, pg, pg.node_scores.keys(), pg.node_scores,
                    output_directory=output_directory, min_node_weight=settings.MIN_MP_SCORE,
                    circos_max_no_mps=settings.CIRCOS_MAX_NO_MPS)

                if mp_graph_name is not None:
                    html_report.add_conflict_graph(patient, mp_graph_name, phylogeny=pg)

                opt_sol = phylogeny.solutions[0]  # optimal solution
                # create plot only if there is enough space for all the incompatible mutations
                if (0 < len(opt_sol.false_positives) + len(opt_sol.false_negatives) +
                        len(opt_sol.false_negative_unknowns) < def_sets.MAX_MUTS_TABLE_PLOT):

                    # illustrative mutation table plot of incompatible mutation patterns and their putative artifacts
                    x_length, y_length = plts.create_incompatible_mp_table(
                        patient, os.path.join(output_directory, fn_pattern+settings.artifacts_plot_suffix),
                        phylogeny, row_labels=patient.sample_names, column_labels=col_labels)
                    if x_length > 0 and y_length > 0:
                        # add information about putative false-positives and false-negatives to the HTML report
                        html_report.add_artifacts_information(
                            phylogeny, artifacts_plot_filepath=fn_pattern+settings.artifacts_plot_suffix+'.png',
                            plot_width=x_length*7)
                else:
                    html_report.add_artifacts_information(phylogeny)

        # find evolutionary incompatible mutation patterns based on standard binary classification
        elif args.mode == 2:

            phylogeny = ti.infer_max_compatible_tree(os.path.join(output_directory, fn_pattern+'_btree.tex'),
                                                     patient, drivers=put_driver_genes)

            if plots_report:
                # create mutation pattern overview plot
                # show only the different patterns and not the individual variants
                # (convenient for large numbers of variants)
                mp_graph_name = mp_graph.create_mp_graph(
                    fn_pattern, phylogeny, phylogeny.nodes, phylogeny.node_scores,
                    output_directory=output_directory, min_node_weight=settings.MIN_MP_SCORE,
                    circos_max_no_mps=settings.CIRCOS_MAX_NO_MPS)

                if mp_graph_name is not None:
                    html_report.add_conflict_graph(patient, mp_graph_name)

                # create plot only if there is enough space for all the incompatible mutations
                if len(phylogeny.conflicting_mutations) < def_sets.MAX_MUTS_TABLE_PLOT:
                    # illustrative mutation table plot of incompatible mutation patterns
                    x_length, y_length = plts.create_incompatible_mp_table(
                        patient, os.path.join(output_directory, fn_pattern+settings.incomp_mps_plot_suffix),
                        phylogeny, row_labels=patient.sample_names, column_labels=col_labels)

                    if x_length > 0 and y_length > 0:
                        # add information about evolutionarily incompatible mutation patterns to the HTML report
                        html_report.add_inc_mp_information(
                            phylogeny, incomp_mps_plot_filepath=fn_pattern+settings.incomp_mps_plot_suffix + '.png',
                            plot_width=x_length*7)
                else:
                    # too many evolutionarily incompatible mutation to create mutation table plot
                    # add information about evolutionarily incompatible mutation patterns to the HTML report
                    html_report.add_inc_mp_information(phylogeny)

        # generate analysis file to provide an overview about the derived results
        analysis.create_analysis_file(patient, args.min_median_coverage,
                                      os.path.join(output_directory, fn_pattern+'_analysis.txt'), phylogeny)

        # create input data file for circos conflict graph plots
        if plots_report:
            circos.create_raw_data_file(os.path.join(output_directory, 'fig_data_'+fn_pattern+'_mutdata.txt'),
                                        patient.mutations, patient.mut_positions, data=patient.data,
                                        sample_names=patient.sample_names)

            # create labels file if gene names are available
            # typically not available in VCF files
            if patient.gene_names is not None and len(patient.gene_names):
                circos.create_mutation_labels_file(
                    os.path.join(output_directory, 'fig_data_'+fn_pattern+'_mutlabels.txt'),
                    patient.mutations, patient.gene_names, patient.mut_positions, patient.driver_pathways)

            if args.mode == 1 and len(patient.data) < 500:
                # create input data files for circular plots with circos: conflict graph
                circos.create_mlh_graph_files(
                    os.path.join(output_directory, 'mlh_nodes_'+fn_pattern+'.txt'),
                    os.path.join(output_directory, 'mlh_mutnode_labels_'+fn_pattern+'.txt'),
                    os.path.join(output_directory, 'mlh_mutnode_data_'+fn_pattern+'.txt'),
                    patient.data, phylogeny, patient.gene_names, patient.driver_pathways)

            # if there are less than 10000 edges in the conflict graph
            elif args.mode == 2:
                if phylogeny.cf_graph.size() < 50000:
                    circos.create_mutation_links_file(
                        os.path.join(output_directory, 'fig_data_'+fn_pattern+'_conflicts.txt'),
                        phylogeny, patient.mut_positions)
                else:
                    logger.warn('Circos mutation conflicts data has not been created as there '
                                'are {} edges in the graph.'.format(phylogeny.cf_graph.size()))

                # create input data files for circular plots with circos: conflict graph
                circos.create_conflict_graph_files(
                    os.path.join(output_directory, 'cfg_nodes_'+fn_pattern+'.txt'),
                    os.path.join(output_directory, 'cfg_mutnode_labels_'+fn_pattern+'.txt'),
                    os.path.join(output_directory, 'cfg_mutnode_data_'+fn_pattern+'.txt'),
                    os.path.join(output_directory, 'cfg_links_'+fn_pattern+'.txt'),
                    phylogeny, patient.gene_names, patient.driver_pathways, data=patient.data,
                    min_node_weight=settings.MIN_MP_SCORE, max_no_mps=settings.CIRCOS_MAX_NO_MPS)

    else:
        raise RuntimeError("No mode was provided (e.g. -m 1) to infer the phylogenetic tree.")

    # finalize HTML report
    html_report.end_report(patient.bi_error_rate, patient.bi_c0, patient.max_absent_vaf, settings.LOH_FREQUENCY,
                           fpr, fdr, min_absent_cov, args.min_median_coverage, args.min_median_vaf,
                           max_no_mps=args.max_no_mps)

    logger.info('Treeomics finished evolutionary analysis.')
 def test_clash_with_one_irrelevant_prescription(self):
     patient = Patient(prescriptions=[Prescription("Paracetamol", dispense_date = date.today() - timedelta(days=2), days_supply=2)])
     assert patient.clash(["Aspirin"]) == set()
Example #39
0
    def search_database(self):
        '''
    A function to allow the user to search the patient's appointment

    Parameters
    ----------
    None

    '''
        patients = []  # empty list

        self.input = self.nameupd.get()

        # Used to choose appointment information from database
        sql = "Select * FROM Appointments WHERE name LIKE ?"
        self.result = c.execute(sql, (self.input, ))
        for self.row in self.result:
            patients.append(
                Patient(self.row[6], self.row[1], self.row[2], self.row[3],
                        self.row[4], self.row[5], self.row[7]))
            self.name = self.row[1]
            self.age = self.row[2]
            self.gender = self.row[3]
            self.phone = self.row[4]
            self.residence = self.row[5]
            self.reason = self.row[6]
            self.time = self.row[7]

        if len(patients) == 0:
            messagebox.showerror("Error", "Information is missing")
            return

        self.updname = Label(self.master,
                             text="Patient's Name",
                             bg='navajowhite',
                             font=('Times', 16, 'bold'))
        self.updname.place(x=150, y=275)
        self.entname = Entry(self.master, width=30)
        self.entname.place(x=130, y=325)
        self.entname.insert(END, str(patients[0].name))

        self.updage = Label(self.master,
                            text="Age",
                            bg='navajowhite',
                            font=('Times', 16, 'bold'))
        self.updage.place(x=570, y=275)
        self.entage = Entry(self.master, width=30)
        self.entage.place(x=506, y=325)
        self.entage.insert(END, str(patients[0].age))

        self.updgender = Label(self.master,
                               text="Gender",
                               bg='navajowhite',
                               font=('Times', 16, 'bold'))
        self.updgender.place(x=940, y=275)
        self.entgender = Entry(self.master, width=30)
        self.entgender.place(x=882, y=325)
        self.entgender.insert(END, str(patients[0].gender))

        self.updphone = Label(self.master,
                              text="Phone",
                              bg='navajowhite',
                              font=('Times', 16, 'bold'))
        self.updphone.place(x=184, y=375)
        self.entphone = Entry(self.master, width=30)
        self.entphone.place(x=130, y=425)
        self.entphone.insert(END, str(patients[0].phone))

        self.updresidence = Label(self.master,
                                  text="Residence",
                                  bg='navajowhite',
                                  font=('Times', 16, 'bold'))
        self.updresidence.place(x=550, y=375)
        self.entresidence = Entry(self.master, width=30)
        self.entresidence.place(x=506, y=425)
        self.entresidence.insert(END, str(patients[0].residence))

        self.updreason = Label(self.master,
                               text="Reason",
                               bg='navajowhite',
                               font=('Times', 16, 'bold'))
        self.updreason.place(x=940, y=375)
        self.entreason = Entry(self.master, width=30)
        self.entreason.place(x=882, y=425)
        self.entreason.insert(END, str(patients[0].reason))

        self.updtime = Label(self.master,
                             text="Time",
                             bg='navajowhite',
                             font=('Times', 16, 'bold'))
        self.updtime.place(x=570, y=475)
        self.enttime = Entry(self.master, width=30)
        self.enttime.place(x=506, y=525)
        self.enttime.insert(END, str(patients[0].time))

        # This button is used to update the appointment
        self.update = Button(self.master,
                             text="Update",
                             width=15,
                             height=2,
                             bg='yellowgreen',
                             command=self.update_database)
        self.update.place(x=630, y=575)
        # This button is used to update the appointment
        self.delete = Button(self.master,
                             text="Delete",
                             width=15,
                             height=2,
                             bg='orangered',
                             command=self.delete_appointment)
        self.delete.place(x=450, y=575)
 def test_clash_with_two_prescriptions_for_same_medication(self):
     patient = Patient(prescriptions=[Prescription("Paracetamol", dispense_date = date.today() - timedelta(days=2), days_supply=2),
                                      Prescription("Paracetamol", dispense_date = date.today() - timedelta(days=3), days_supply=2)])
     assert patient.clash(["Paracetamol"]) == set([date.today() - timedelta(days=3),
                                                        date.today() - timedelta(days=2), 
                                                        date.today() - timedelta(days=1)])
Example #41
0
def loadFromFile(filename, dataset, verbose=False, run_pca=True, explain_rat=4., ret_var=False):
    # TODO: arg check

    tissue_name = os.path.basename(filename).split('.')[0]
    tissue = Tissue(tissue_name, dataset)

    tissue_file = open(filename, 'r')

    patient_ids = tissue_file.readline().strip().split('\t')[4:]
    for patient_id in patient_ids:
        if patient_id not in dataset.patients:
            patient = Patient(patient_id)
            dataset.addPatient(patient)
        patient = dataset.patients[patient_id]
        patient.addTissue(tissue)
        
        tissue._rows[patient_id] = tissue.numPatients
        tissue._patients[patient_id] = patient

    # print 'got patients'

    raw_t = [[float(val_str)
              for val_str in line.strip().split('\t')[4:]]
             for line in tissue_file]

    # print 'got data'

    val = np.array(raw_t).T

    var_exp = 0.
    if run_pca:
        pca_model = PCA(n_components=50, copy=False)
        pca_model.fit_transform(val)
        #cov = val.T.dot(val)/(len(raw_t))

        #U, W, _ = np.linalg.svd(cov)

        #cum_var = np.cumsum(W**2)
        #cum_var = cum_var/cum_var[-1]
        cum_var = np.cumsum(pca_model.explained_variance_ratio_)
        explained_ratio = [float(cum_var[i])/float(i+1)
                           for i in range(len(cum_var))]
        
        best_dim = 0
        for dim in range(len(cum_var)):
            if explained_ratio[dim]*len(patient_ids) > explain_rat:
                best_dim = dim
        n_components = best_dim+1
        n_components = max(n_components, 8)

        #val = val.dot(U[:,:n_components])
        val = val[:,:n_components]
        var_exp = cum_var[n_components-1]
        
        if verbose:
            print tissue_name + ' has {} components to explain {}% variance for {} patients'.format(n_components, 100.*var_exp, len(patient_ids))

    elif verbose:
        print tissue_name + ' parsed'

    tissue._value = val

    if ret_var:
        return tissue, var_exp
    else:
        return tissue
Example #42
0
def insert_new_Patient():
    """
    this allows you to append a Patient into the memory
    """
    # A try and except Block to cheack errors
    try:

        # this request for the agumet to initiate new Petient
        name = input("Enter Name: ")
        age = input("Enter Age: ")
        strand = input("Enter DNA strand: ")

        name.strip()
        strand.strip()

        # local var
        diabetes = ""
        blue_eyes = ""
        three_eyes = ""

        # check for input !== ""
        if len(name) == 0 or len(age) == 0:
            raise Exception("pls check your input and try agen")
        else:
            # check for alphabet
            if re.search('[a-zA-Z]', age) == True:
                raise Exception("Sorry Age must be an integer")

        # check for strand length
        if len(strand) == MAX_STRAND:
            # for three_eyes
            if "AAA" in strand:
                three_eyes = "True"
            else:
                three_eyes = "False"
            # for blue_eyes
            if "GGA" in strand:
                blue_eyes = "True"
            else:
                blue_eyes = "False"
                # diabete
            if "CTA" in strand:
                diabetes = "True"
            else:
                diabetes = "False"

            # insert
            init_storage.append(
                Patient(name, age, strand, diabetes, blue_eyes, three_eyes))

            return "Succesfull"

        else:
            raise Exception("Bad input! -length must be 20")

        # this append a new initiated Patient into the init_storage

    # this block catches any error that occurs
    except Exception as err:
        if err:
            return err
                              for day in patient.clash(medicine_names)])
    ax.broken_barh(days_taking_all, (0, 9), facecolors='black')

    medicine_count = len(medicine_names)
    ax.set_ylim(-5, 10 * (medicine_count + 1) + 5)
    ax.set_xlim(-110, 0)
    ax.set_xlabel('relative to today')
    ax.set_yticks([i * 10 + 5 for i in range(medicine_count + 2)])
    ax.set_yticklabels(["Clash"] + medicine_names)
    ax.grid(True)

    fig.savefig(filename)


if __name__ == "__main__":
    patient = Patient()
    patient.add_prescription(
        Prescription("Fluoxetine",
                     dispense_date=date.today() - timedelta(100),
                     days_supply=20))
    patient.add_prescription(
        Prescription("Fluoxetine",
                     dispense_date=date.today() - timedelta(50),
                     days_supply=50))
    patient.add_prescription(
        Prescription("Codeine",
                     dispense_date=date.today() - timedelta(15),
                     days_supply=3))
    patient.add_prescription(
        Prescription("Codeine",
                     dispense_date=date.today() - timedelta(60),
Example #44
0
 def test_clash_with_one_irrelevant_prescription(self):
     patient = Patient(prescriptions=[Prescription("Codeine", dispense_date=days_ago(days=2), days_supply=2)])
     assert patient.clash(["Prozac"]) == set()
Example #45
0
            availableFrom = calculateTime(availableFrom)
            availableTo = input("\tEnter Available To Time(00:00): ")
            availableTo = calculateTime(availableTo)
            doctorObject = Doctor(name, doctorSpec, availableDay,
                                  availableFrom, availableTo)
            system1.addDoctor(doctorObject)

        elif choice == 2:
            name = input("\n\tEnter Name: ")
            #Print Specializations
            print("\n\t\t1. Pediatrician\n\t\t2. ENT\n\t\t3. GP")
            patientSpec = int(input("\tEnter Specialization Required: "))
            #Print Days
            print(
                "\n\t\t1. Monday\n\t\t2. Tuesday\n\t\t3. Wednesday\n\t\t4. Thursday\n\t\t5. Friday\n\t\t6. Saturday\n\t\t7. Sunday"
            )
            day = int(input("\tEnter Required Day: "))
            fromTime = input("\tEnter From Time(00:00): ")
            fromTime = calculateTime(fromTime)
            toTime = input("\tEnter To Time(00:00): ")
            toTime = calculateTime(toTime)
            patientObject = Patient(name, patientSpec, day, fromTime, toTime)
            system1.assignDoctor(patientObject)

        elif choice == 3:
            print("\n\t\tThank You!")
            break

        else:
            print("\n\t\tInvalid Choice!")
            continue
Example #46
0
    else:
        diabetes = "False"
    # assigning values with keys to the dictionary
    final_res['strand'] = strans
    final_res['diabetes'] = diabetes
    final_res['blue'] = blue_eyes
    final_res['three'] = three_eyes

    return final_res


# Storage for each initiation
init_storage = [
    Patient("Andrea", 37,
            random_strand()['strand'],
            random_strand()['diabetes'],
            random_strand()['blue'],
            random_strand()['three']),
    Patient("Bob", 28,
            random_strand()['strand'],
            random_strand()['diabetes'],
            random_strand()['blue'],
            random_strand()['three']),
    Patient("Brooke", 34,
            random_strand()['strand'],
            random_strand()['diabetes'],
            random_strand()['blue'],
            random_strand()['three']),
    Patient("Connor", 27,
            random_strand()['strand'],
            random_strand()['diabetes'],
class TestDepartment(TestCase):
    """This is a test class to test the Department class"""
    def setUp(self) -> None:
        """Initialize setup object"""
        self.department = Department("Emergency")
        self.department1 = Department("Surgery")
        self.read_mock = Mock()
        self.department._write_to_file = self.read_mock

        self.doctor1 = Doctor("George", "Bush", "1982-2-28",
                              "97334 Oak Bridge , Vancouver, Vancouver, BC", 2,
                              False, 125, 190000)
        self.patient1 = Patient("Jose", "McDonald", "1970-12-12",
                                "3432 Newtons, Richmond, BC", 1, False, 590)
        self.doctor2 = Doctor("Johnny", "Kenedy", "1984-1-30",
                              "1444 Oakway, North Vancouver, Vancouver, BC", 1,
                              False, 123, 150000)
        self.patient2 = Patient("Bill", "Stark", "1970-12-12",
                                "1111 Columbia, New Westminster, BC", 2, False,
                                589, 10000)

        self.patient3 = Patient("Tony", "Stark", "1960-9-2",
                                "1111 Columbia, New Westminster, BC", 12,
                                False, 589)

    def test_valid_constructor(self):
        """Test an object is created correctly"""
        self.assertIsNotNone(self.department)
        self.assertIsInstance(self.department, Department)

    def test_invalid_constructor(self):
        """Test an object with invalid parameters"""
        with self.assertRaises(TypeError):
            department_1 = Department(1)

    def test_add_person(self):
        """Test to add an object (person) to the department"""
        with self.assertRaises(ValueError):
            self.department.add_person(self.patient1)
            self.assertTrue(self.read_mock.called)

        # This is to test how many patient objects are added to the self.department
        test_case = self.department.get_statistics()
        self.assertEqual(test_case.get_not_released_patient_num(), 1)

    def test_remove_person(self):
        """Test to remove an object (person) out of the department"""
        test_id = self.patient1.get_id()
        self.department.remove_person_by_id(test_id)
        self.assertTrue(self.read_mock.called)

        # This is to test how many patient objects left in the self.department
        test_case = self.department.get_statistics()
        self.assertEqual(test_case.get_not_released_patient_num(), 0)

        # This is to test exception value error of remove method
        with self.assertRaises(ValueError):
            self.department.remove_person_by_id(100000)

    def test_check_existing_person(self):
        """Test to check if an object (person) is in the department"""
        test_id = self.patient3.get_id()
        self.assertEqual(self.department.person_exist(test_id), False)

        test_id_1 = self.doctor2.get_id()
        self.assertEqual(self.department.person_exist(test_id_1), True)

    def test_get_person_by_type(self):
        """Test to get all people with a given type"""
        obj = self.department.get_person_by_type("Teacher")
        self.assertEqual(len(obj), 0)
        self.department.get_all_current_people()
        obj1 = self.department.get_person_by_type("Patient")
        self.assertEqual(len(obj1), 2)

    def test_get_all_people(self):
        """Test to get all people in a department"""
        self.department.get_all_current_people()

    def test_get_existing_person_by_id(self):
        """Test to get an object (person) from the department"""
        test_id = self.patient1.get_id()
        obj1 = self.department.get_person_by_id(test_id)
        D1 = obj1.to_dict()
        self.assertEqual(D1, self.patient1.to_dict())

        obj2 = self.department.get_person_by_id(10000)
        self.assertEqual(obj2, None)

    def test_get_department_name(self):
        """Test to get the name of the department"""
        self.assertEqual(self.department.get_name(), "Emergency")

    def test_get_get_statistics(self):
        """Test to get statistics of the department"""
        test_case = self.department.get_statistics()
        self.assertEqual(test_case.get_not_released_patient_num(), 1)
        self.assertEqual(test_case.get_released_patient_num(), 1)
        self.assertEqual(test_case.get_total_bill_amount_released_patients(),
                         10000)

    def test_write_data_to_file(self):
        """Test to see if a file is opened and gotten data"""
        self.department._write_to_file()
        self.assertTrue(self.read_mock.called)

    @mock.patch('department.Department._read_from_file')
    def test_read_data_to_file(self, mock_read_func):
        """Test to see if a file is opened and data is written on"""
        self.department._read_from_file()
        self.assertTrue(mock_read_func.called)

    def test_get_created_entities(self):
        """Test to check a valid object is created correctly"""
        person1 = [{
            'first_name': 'George',
            'last_name': 'Bush',
            'date_of_birth': "1970-12-12 00:00:00",
            'address': '97334 Oak Bridge , Vancouver, Vancouver, BC',
            'id': 2,
            'is_released': False,
            'office_num': 125,
            'income': 190000
        }]
        person2 = [{
            'first_name': 'Jose',
            'last_name': 'McDonald',
            'date_of_birth': "1970-12-12 00:00:00",
            'address': '3432 Newtons, Richmond, BC',
            'is_released': False,
            'id': 1,
            'room_num': 589,
            'bill': 0.0
        }]
        self.department1.create_entities(person2, "Patient")
        self.department1.create_entities(person1, "Doctor")
        obj1 = self.department1.get_person_by_id(1)
        self.assertIsNotNone(obj1)
        self.assertIsInstance(obj1, (Doctor, Patient))
        obj2 = self.department1.get_person_by_id(2)
        self.assertIsNotNone(obj2)
        self.assertIsInstance(obj2, (Doctor, Patient))

    def test_update_person(self):
        """Test update information of a person"""
        with self.assertRaises(ValueError):
            self.department.update_person(4, "Uy", "Tran", 600, 10000)
            self.department.update_person(1, "Uy", "Tran", 600, 10000)
            self.assertTrue(self.read_mock.called)

    def test_to_dict(self):
        """Test to check to_dict() return an expected value"""
        self.D1 = self.department1.to_dict()
        self.D2 = {
            'name':
            'Surgery',
            'Patient': [{
                'first_name': 'Jose',
                'last_name': 'McDonald',
                'date_of_birth': datetime(1970, 12, 12, 0, 0),
                'address': '3432 Newtons, Richmond, BC',
                'is_released': False,
                'id': 1,
                'room_num': 590,
                'bill': 0
            }, {
                'first_name': 'Bill',
                'last_name': 'Stark',
                'date_of_birth': datetime(1960, 9, 2, 0, 0),
                'address': '1111 Columbia, New Westminster, BC',
                'is_released': True,
                'id': 2,
                'room_num': 589,
                'bill': 10000
            }],
            'Doctor': [{
                'first_name': 'George',
                'last_name': 'Bush',
                'date_of_birth': datetime(1982, 2, 28, 0, 0),
                'address': '97334 Oak Bridge , Vancouver, Vancouver, BC',
                'id': 2,
                'is_released': False,
                'office_num': 125,
                'income': 190000
            }]
        }
        self.assertDictEqual(self.D1, self.D2)
Example #48
0
 def test_days_taking_for_irrelevant_prescription(self):
     patient = Patient(prescriptions=[Prescription("Codeine", dispense_date=days_ago(days=2), days_supply=2)])
     assert patient.days_taking("Prozac") == set()
 def patient(self):
     self._canvas.destroy()
     Patient(self.root)
Example #50
0
import sqlite3
from patient import Patient
from datetime import date

zero = Patient('Surname', 'Name', str(date(2019, 11, 4)), 'Street', 185, 80)
first = Patient('Surname1', 'Name1', str(date(2019, 11, 4)), 'Street1', 171,
                81)

conn = sqlite3.connect('hospital.db')
cursor = conn.cursor()

try:
    cursor.execute(
        """insert into Patients values
                    (:surname, :name, :birthDate, :address,
                    :height, :weight, 1)""", {
            "surname": zero.surname,
            "name": zero.name,
            "birthDate": zero.birth_date,
            "address": zero.address,
            "height": zero.height,
            "weight": zero.weight
        })
    cursor.execute(
        """insert into Patients values
                    (:surname, :name, :birthDate, :address,
                    :height, :weight, 2)""", {
            "surname": first.surname,
            "name": first.name,
            "birthDate": first.birth_date,
            "address": first.address,
from student import Student
from patient import Patient

new_student = Student()
new_student.first_name = "John"
new_student.last_name = "Doe"
new_student.cohort_number = 36
new_student.age = 42
# new_student.full_name = "Happy Days"
print(new_student)

cashew = Patient(
    "097-23-1003", "08/13/92", "7001294103",
    "Daniela", "Agnoletti", "500 Infinity Way")


# This should not change the state of the object
# cashew.social_security_number = "838-31-2256"

# Neither should this
# cashew.date_of_birth = "01-30-90"

# But printing either of them should work
print(cashew.social_security_number)
# "097-23-1003"

# # These two statements should output nothing
# print(cashew.first_name)
# print(cashew.last_name)

# # But this should output the full name
Example #52
0
 def _schedule(self):
     """Obtains patient info and schedules patient."""
     name = input("\nEnter the patient's name:")
     condition = self._getCondition()
     self._model.schedule(Patient(name, condition))
     print(name, "is added to the", condition, "list\n")
Example #53
0
# get training data from un-concussed individuals
n_keep = 1000

train_lists = [pid_concussion, pid_noConcussion]
train_bools = [True, False]
examples_lists = [[], []]
train_examples = []
labels = ["concussion", "noconcussion"]

# for lst, pat_list in zip([pid_noConcussion, pid_3stepProtocol, pid_testRetest, pid_concussion], [noCon_pats, step_pats, retest_pats, con_pats]):
# for lst, pat_list in zip([pid_noConcussion], [noCon_pats]):
i = 0
for pid_list, train_bool in zip(train_lists, train_bools):
    for pid in pid_list:
        print("Processing pid: {}".format(pid))
        p = Patient(pid, subfolder, load_session_raw=False, load_session_examples=True)
        # get examples from pre_test
        pre = post = None
        if p.pre_test is not None:
            pre = p.pre_test.load_examples(subfolder)
            if pre is not None:
                np.random.shuffle(pre)
        
        if (p.n_concussions == 0):
            # get examples from post_test
            if p.post_test is not None:
                post = p.post_test.load_examples(subfolder)
                if post is not None:
                    np.random.shuffle(post)
        else:
            if p.intermediate_tests[0] is not None: