def main():

    # Parse arguments
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument("i", type=str, help="Quicklogic 'TechFile' file")
    parser.add_argument(
        "--stages",
        type=str,
        default="STREET",
        help="Comma-separated list of stage types to view (def. STREET)")

    args = parser.parse_args()

    # Read and parse the XML file
    xml_tree = ET.parse(args.i)
    xml_root = xml_tree.getroot()

    # Load data
    data = import_data(xml_root)
    switchbox_types = data["switchbox_types"]

    # Generate DOT files with switchbox visualizations
    for switchbox in switchbox_types.values():
        fname = "sbox_{}.dot".format(switchbox.type)
        with open(fname, "w") as fp:
            fp.write(switchbox_to_dot(switchbox, args.stages.split(",")))
def std_mean_relationship():
    path = '/Users/veronikasamborska/Desktop/photometry_code/code/data'
    files = os.listdir(path)
    mean_signal_corrected_list = []
    std_list = []
    plt.figure()
    for file in files:
        if 'p8-VTA-2018-04-20-142431.ppd' in file:
            data = di.import_data(file)
            OLS = LinearRegression()
            OLS.fit(data['ADC2_filt'][:, None], data['ADC1_filt'][:, None])
            estimated_motion = OLS.predict(data['ADC2_filt'][:,
                                                             None]).squeeze()
            corrected_signal = data['ADC1_filt'] - estimated_motion
            mean_signal_corrected = np.mean(data['ADC1'])
            std_signal_corrected = np.std(corrected_signal)
            mean_signal_corrected_list.append(mean_signal_corrected)
            std_list.append(std_signal_corrected)
            plt.xscale('linear')
            plt.scatter(mean_signal_corrected, std_signal_corrected)
            plt.pause(0.05)
            plt.xlabel('Mean Signal')
            plt.ylabel('Standard Deviationj of the Signal')
    z = np.polyfit(mean_signal_corrected_list, std_list, 1)
    p = np.poly1d(z)
    plt.plot(mean_signal_corrected_list, p(mean_signal_corrected_list), "r")
    plt.show()
def calcium():
    path = '/Users/veronikasamborska/Desktop/photometry_code/code/data'
    files = os.listdir(path)
    session_list = []
    std_list = []
    plt.figure()
    date_list = []
    session_list_old_LED = []
    date_list_old_LED = []
    std_list_old_LED = []
    for file in files:
        if 'p8' in file:
            if file in files_locked_days:
                data = di.import_data(file)
                if data['datetime'] <= LED_change_day:
                    date_int_old_LED = data['datetime'] - surgery_date_p8
                    std_old_LED = np.std(data['ADC1_filt'])
                    std_list_old_LED.append(std_old_LED)
                    session_list_old_LED.append(data['datetime_str'])
                    date_list_old_LED.append(date_int_old_LED.days)
                else:
                    date_int = data['datetime'] - surgery_date_p8
                    std = np.std(data['ADC1_filt'])
                    std_list.append(std)
                    session_list.append(data['datetime_str'])
                    date_list.append(date_int.days)
                #if std < 0.05:
                #print(file)
    date_list_old_LED.sort()
    date_list.sort()
    print(date_list)
    sorted_ind = np.argsort(session_list)
    session_list.sort(
        key=lambda date: datetime.strptime(date, '%Y-%m-%d %H:%M:%S'))
    np_std = np.asarray(std_list)
    newarray = np_std[sorted_ind]

    sorted_ind_old_LED = np.argsort(session_list_old_LED)
    session_list_old_LED.sort(
        key=lambda date: datetime.strptime(date, '%Y-%m-%d %H:%M:%S'))
    np_std_old = np.asarray(std_list_old_LED)
    newarray_old_LED = np_std_old[sorted_ind_old_LED]
    #plt.xlim(min(date_list_old_LED), max(date_list))
    plt.scatter(date_list, newarray, color='red', label='Old LED')
    plt.scatter(date_list_old_LED,
                newarray_old_LED,
                color='blue',
                label='New LED')
    plt.legend()
    plt.xlabel('Days from Surgery')
    plt.ylabel('Standard Deviationj of the Signal')
    plt.title('p8')
    plt.show()
########################
# Running the code
########################
# Initialize logging
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)


# Define our global variables
headerFile = '/Users/alexandertitus/pinnacles/summaryAPI/python/data/header.csv'
detailFile = '/Users/alexandertitus/pinnacles/summaryAPI/python/data/detail.csv'
patientHeadIDColName = 'GECaseID'
patientDetailIDColName = 'SNACaseID'


# Import data
data = di.import_data(headerFile, detailFile)
header_data = data[0]
detail_data = data[1]

k = 8
# Run the VSM and extract results
# new_data = vdm.prepare_date_vsm(detail_data)
# new_data = vdm.prepare_patient_vsm(detail_data)

new_data = vdm.prepare_vsm(detail_data, 'date', 'CPT')
clusters = cluster.create_clusters(new_data, k=k, model='LSI')

for i in range(k): print "Set " + str(i) + ': ', len(clusters[1][i]), len(set(clusters[0][i]))
for i in range(k): pprint(set(clusters[0][i]))

# # Plot the clusters
Beispiel #5
0
def prepData(filePath, test_rate):
    data = data_import.import_data(filePath, test_rate=test_rate, test=1)
    return data
Beispiel #6
0
]

files_rcon_locked = [
    'p8-VTA-2018-04-20-141632.ppd', 'p8-VTA-2018-04-25-135250.ppd',
    'p8-VTA-2018-04-27-170423.ppd', 'p8-VTA-2018-04-30-133859.ppd',
    'p6-VTA-2018-05-02-150558.ppd', 'p6-VTA-2018-05-01-161845.ppd',
    'p6-VTA-2018-04-30-141740.ppd', 'p6-VTA-2018-04-27-171711.ppd',
    'p6-VTA-2018-04-24-142118.ppd', 'p8-VTA-2018-05-04-115502.ppd',
    'p8-VTA-2018-05-03-140309.ppd', 'p8-VTA-2018-05-02-143100.ppd',
    'p8-VTA-2018-05-01-153359.ppd', 'p8-VTA-2018-04-30-133859.ppd'
]

data_dict = OrderedDict([
    #('isbs_lk100z', [di.import_data(file_name) for file_name in files_isbs_lk100z]),
    ('isbs_locked',
     [di.import_data(file_name) for file_name in files_isbs_locked]),
    ('isbs_unlckd',
     [di.import_data(file_name) for file_name in files_isbs_unlckd]),
    ('rdif_locked',
     [di.import_data(file_name) for file_name in files_rdif_locked]),
    ('rdif_unlckd',
     [di.import_data(file_name) for file_name in files_rdif_unlckd]),
    ('rcon_locked',
     [di.import_data(file_name) for file_name in files_rcon_locked]),
    ('rcon_unlckd',
     [di.import_data(file_name) for file_name in files_rcon_unlckd])
])


def control_signal_CV_plot(fig_no=1):
    motion_signal_CV = [[an.motion_signal_CV(data) for data in condition]
Beispiel #7
0
from keras.layers import Dense, Dropout
from keras import regularizers
from keras.models import Model, Sequential
import data_import
import pickle as pkl
import json
from keras.utils import plot_model
#data = data_import.import_data("~/Dropbox/missense_pred/data/Ben/inputs/input_data.HIS.csv",test = 1)

data = data_import.import_data(
    "~/Dropbox/missense_pred/data/Ben/input_data.HS.csv", test=1)

X_train = data[0]['X_train']
X_test = data[0]['X_test']
y_train = data[1]['y_train']
y_test = data[1]['y_test']

Model1 = Sequential()
Model1.add(Dense(40, input_shape=(46, ), activation='relu', name='inter1'))
Model1.add(Dense(46, activation='relu'))

Model1.compile(optimizer='adam', loss='mean_squared_error')
Model1.fit(X_train,
           X_train,
           epochs=100,
           batch_size=1000,
           validation_data=(X_test, X_test))

weight_1 = Model1.get_weights()[0]
with open("weight_1", 'wb') as f:
    pkl.dump(weight_1, f)
from data_import import import_data
import pandas as pd
from matplotlib import pyplot as plt
import os

os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils import plot_model, normalize
#Compile options
optimizer = 'rmsprop'
loss = 'binary_crossentropy'
metrics = ['accuracy']

#Import data
dataset, labels, features = import_data('Exercise1 - data.csv')
features = normalize(dataset.drop('Classification', axis=1).values,
                     axis=0,
                     order=2)

#Create the model, based on the 'MLP for binary classification' from https://keras.io/getting-started/sequential-model-guide/
model = Sequential()

model.add(Dense(64, input_dim=30, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

#Compile the model
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
def main():

    # Parse arguments
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument("--techfile",
                        type=str,
                        required=True,
                        help="Quicklogic 'TechFile' XML file")
    parser.add_argument("--fasm",
                        type=str,
                        default="default.fasm",
                        help="Output FASM file name")
    parser.add_argument("--device",
                        type=str,
                        choices=["eos-s3"],
                        default="eos-s3",
                        help="Device name to generate the FASM file for")
    parser.add_argument(
        "--dump-dot",
        action="store_true",
        help="Dump Graphviz .dot files for each routed switchbox type")
    parser.add_argument("--allow-routing-failures",
                        action="store_true",
                        help="Skip switchboxes that fail routing")

    args = parser.parse_args()

    # Read and parse the XML file
    xml_tree = ET.parse(args.techfile)
    xml_root = xml_tree.getroot()

    # Load data
    print("Loading data from the techfile...")
    data = import_data(xml_root)
    switchbox_types = data["switchbox_types"]
    switchbox_grid = data["switchbox_grid"]
    tile_types = data["tile_types"]
    tile_grid = data["tile_grid"]

    # Route switchboxes
    print("Making switchbox routes...")

    fasm = []
    fully_routed = 0
    partially_routed = 0

    def input_rank(pin):
        """
        Returns a rank of a switchbox input. Pins with the lowest rank should
        be expanded first.
        """
        if pin.name == "GND":
            return 0
        elif pin.name == "VCC":
            return 1
        elif pin.type not in [SwitchboxPinType.HOP, SwitchboxPinType.GCLK]:
            return 2
        elif pin.type == SwitchboxPinType.HOP:
            return 3
        elif pin.type == SwitchboxPinType.GCLK:
            return 4

        return 99

    # Process each switchbox type
    for switchbox in switchbox_types.values():
        print("", switchbox.type)

        # Identify all locations of the switchbox
        locs = [
            loc for loc, type in switchbox_grid.items()
            if type == switchbox.type
        ]

        # Initialize the builder
        builder = SwitchboxConfigBuilder(switchbox)

        # Sort the inputs according to their ranks.
        inputs = sorted(switchbox.inputs.values(), key=input_rank)

        # Propagate them
        for stage in ["STREET", "HIGHWAY"]:
            for pin in inputs:
                if pin.name in builder.stage_inputs(stage):
                    builder.propagate_input(stage, pin.name)

        # Check if all nodes are configured
        routing_failed = not builder.check_nodes()

        # Dump dot
        if args.dump_dot:
            dot = builder.dump_dot()
            fname = "defconfig_{}.dot".format(switchbox.type)
            with open(fname, "w") as fp:
                fp.write(dot)

        # Routing failed
        if routing_failed:
            if not args.allow_routing_failures:
                exit(-1)

        # Stats
        if routing_failed:
            partially_routed += len(locs)
        else:
            fully_routed += len(locs)

        # Emit FASM features for each of them
        for loc in locs:
            fasm.extend(builder.fasm_features(loc))

    print(" Total switchboxes: {}".format(len(switchbox_grid)))
    print(" Fully routed     : {}".format(fully_routed))
    print(" Partially routed : {}".format(partially_routed))

    # Power on all LOGIC cells
    for loc, tile in tile_grid.items():

        # Get the tile type object
        tile_type = tile_types[tile.type]

        # If this tile has a LOGIC cell then emit the FASM feature that
        # enables its power
        if "LOGIC" in tile_type.cells:
            feature = "X{}Y{}.LOGIC.LOGIC.Ipwr_gates.J_pwr_st".format(
                loc.x, loc.y)
            fasm.append(feature)

    # Write FASM
    print("Writing FASM file...")
    with open(args.fasm, "w") as fp:
        fp.write("\n".join(fasm))
#Classifier for dataset 2
import numpy as np
from data_import import import_data
from shuffle_in_unison import shuffle_in_unison
import os

os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD, adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical

#Import data
labels, images, dataset=import_data('Cancer labels.csv', 'cancer_images')
images=np.reshape(images, [20,400,640,1])

labels=to_categorical(labels)

#Create model
#Based on VGG-like net
model = Sequential()

model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(400, 640, 1)))
model.add(MaxPooling2D(pool_size=(5, 5)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(5, 5)))
model.add(Dropout(0.25))
Beispiel #11
0
lofz3 = set(gene for gene, score in lofz.items() if score >= 3)

geneset = HS_gene  #& lofz3

model = Sequential()
#model.add(Dropout(0.5,input_shape=(46,)))
model.add(Dense(40, input_shape=(46, ), activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.load_weights("SAE_weights.h5")

# ASD case
fpath = "~/Dropbox/missense_pred/data/Ben/case_control/case_control/case.anno.rare." + prefix + ".reformat.cnn.csv"
#fpath = '~/Dropbox/missense_pred/data/john/HIS_case.anno.rare.reformat.csv'
data = data_import.import_data(fpath)
X_train = data[0]['X_train']
X_test = data[0]['X_test']
y_train = data[1]['y_train']
y_test = data[1]['y_test']
df = pd.read_csv(fpath)
y_true = df.pop('target')
y_score = model.predict_proba(X_test, batch_size=20, verbose=1)
CASE = df.assign(SAE_prob=y_score)

#CHD case
fpath = "~/Dropbox/missense_pred/data/Ben/case_control/case_control/chd_yale.anno.rare." + prefix + ".reformat.cnn.csv"
data = data_import.import_data(fpath)
X_train = data[0]['X_train']
X_test = data[0]['X_test']
y_train = data[1]['y_train']
Beispiel #12
0
        plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')

    total = len(y_true)
    pos = sum(y_true)
    neg = total - pos

    plt.title(
        'Receiver operating characteristic of {}: {} positive, {} negative'.
        format(label, pos, neg))
    plt.legend(loc="lower right", fontsize='medium')
    plt.show()


fname2 = "/Users/bindy/Dropbox/missense_pred/data/Ben/metaSVM/metaSVM_test2.anno.rare.HIS.reformat.cnn.csv"
fname = '/Users/bindy/Dropbox/missense_pred/data/cancer_hotspots/cancer_sel.HIS.reformat.cnn.csv'
data = data_import.import_data(fname)
data2 = data_import.import_data(fname)
X_train = data[0]['X_train']
X_test = data[0]['X_test']
y_train = data[1]['y_train']
y_test = data[1]['y_test']
df = pd.read_csv(fname)
y_true = df.pop('target')
y_score = model.predict_proba(X_test, batch_size=20, verbose=1)
df = df.assign(SAE_prob=y_score)
plot_roc(df, y_true, label="HIS_metaSVM_addtest2")
    date_stub = datetime.datetime.strptime(date_stub_string, "%Y-%m-%d")
    print("Converted date stub:", date_stub)
    current_date = date_stub
else:
    current_date = get_today_date()

print("--- 24/7 support - upcoming shift notifier ---")

print("Today is", WEEK_DAYS[current_date.weekday()],
      current_date.strftime("%Y-%m-%-d"))

if current_date.weekday() == WEEK_DAYS.index(NOTIFICATION_WEEK_DAY) or \
        NOTIFICATION_WEEK_DAY is None:
    print("Sending notifications")
    # sys.exit(0)
    moc_info_df, engineer_df, moc_calendar_df, engineer_calendar_df = import_data(
    )
    next_week_engineers_schedule_df = get_next_week_engineers_schedule_df(
        current_date, engineer_calendar_df)
    next_week_moc_schedule_df = get_next_week_moc_schedule_df(
        current_date, moc_calendar_df)

    next_week_engineers_df, next_week_moc_df = get_next_week_engineers_and_moc(
        next_week_engineers_schedule_df, next_week_moc_schedule_df)

    email_list = send_notifications(next_week_engineers_schedule_df,
                                    next_week_moc_schedule_df,
                                    next_week_engineers_df, next_week_moc_df,
                                    current_date)
    if should_simulate_email_sending:
        simulate_sending_mails(email_list)
else: