示例#1
0
def __startRecordingAndStreaming():
    # Connect a client socket to my_server:8000 (change my_server to the
    # hostname of your server)

    if isMonitoringWorking():
        return
    global __monitoringWorking
    __monitoringWorking = True

    LedChanger.lightPhotoLedOn()
    print("Monitoring has been started")
    try:
        with picamera.PiCamera() as camera:
            global __camera
            __camera = camera
            camera.resolution = (1640, 1232)
            # camera.framerate = 23

            import DataManager
            from DataManager import deviceName, videoDir
            DataManager.createVideoDirIfNotExists()
            videoPath = str(videoDir) + str(deviceName) + "_" + str(
                datetime.datetime.now()) + '.h264'
            print("a")
            camera.start_recording(videoPath, resize=(1024, 768))
            print("b")
            __currentMonitoringPeriodicTask.launchMonitoringPeriodicTask(
                camera, videoPath)
    except Exception as e:
        utils.printException(e)
        onMonitoringStopped()
    print("__startRecordingAndStreaming finished")
示例#2
0
def requestGetFromAliAPI(url, successBlock=None, failureBlock=None):
    global m_ctx

    bodys = {}
    method = 'GET'
    # 拼接url
    Logger.log("now request.url = " + url)

    urlReq = request.Request(url)
    urlReq.add_header('Authorization', 'APPCODE ' + constant.AppCode)
    response = request.urlopen(urlReq, context=m_ctx)
    respData = response.read()
    # respData = "{\"showapi_res_code\":0,\"showapi_res_error\":\"\",\"showapi_res_body\":{\"ret_code\":0,\"list\":[{\"trade_money\":\"368947118.000\",\"diff_money\":\"-0.03\",\"open_price\":\"8.880\",\"code\":\"601006\",\"date\":\"2017-11-30\",\"min_price\":\"8.810\",\"market\":\"sh\",\"trade_num\":\"416807\",\"turnover\":\"0.280\",\"close_price\":\"8.850\",\"max_price\":\"8.910\",\"swing\":\"1.13\",\"diff_rate\":\"-0.34\"},{\"trade_money\":\"388937580.000\",\"diff_money\":\"-0.02\",\"open_price\":\"8.900\",\"code\":\"601006\",\"date\":\"2017-11-29\",\"min_price\":\"8.820\",\"market\":\"sh\",\"trade_num\":\"438484\",\"turnover\":\"0.295\",\"close_price\":\"8.880\",\"max_price\":\"8.920\",\"swing\":\"1.12\",\"diff_rate\":\"-0.22\"},{\"trade_money\":\"338012785.000\",\"diff_money\":\"-0.12\",\"open_price\":\"8.940\",\"code\":\"601006\",\"date\":\"2017-11-28\",\"min_price\":\"8.870\",\"market\":\"sh\",\"trade_num\":\"379584\",\"turnover\":\"0.255\",\"close_price\":\"8.900\",\"max_price\":\"8.970\",\"swing\":\"1.11\",\"diff_rate\":\"-1.33\"},{\"trade_money\":\"679468677.000\",\"diff_money\":\"0.0\",\"open_price\":\"8.940\",\"code\":\"601006\",\"date\":\"2017-11-27\",\"min_price\":\"8.790\",\"market\":\"sh\",\"trade_num\":\"762013\",\"turnover\":\"0.513\",\"close_price\":\"9.020\",\"max_price\":\"9.040\",\"swing\":\"2.77\",\"diff_rate\":\"0.0\"},{\"trade_money\":\"716821338.000\",\"diff_money\":\"-0.09\",\"open_price\":\"9.060\",\"code\":\"601006\",\"date\":\"2017-11-24\",\"min_price\":\"8.810\",\"market\":\"sh\",\"trade_num\":\"801930\",\"turnover\":\"0.539\",\"close_price\":\"9.020\",\"max_price\":\"9.100\",\"swing\":\"3.18\",\"diff_rate\":\"-0.99\"},{\"trade_money\":\"1061585849.000\",\"diff_money\":\"-0.27\",\"open_price\":\"9.400\",\"code\":\"601006\",\"date\":\"2017-11-23\",\"min_price\":\"9.030\",\"market\":\"sh\",\"trade_num\":\"1141401\",\"turnover\":\"0.768\",\"close_price\":\"9.110\",\"max_price\":\"9.490\",\"swing\":\"4.9\",\"diff_rate\":\"-2.88\"},{\"trade_money\":\"930883985.000\",\"diff_money\":\"0.09\",\"open_price\":\"9.310\",\"code\":\"601006\",\"date\":\"2017-11-22\",\"min_price\":\"9.210\",\"market\":\"sh\",\"trade_num\":\"1001580\",\"turnover\":\"0.674\",\"close_price\":\"9.380\",\"max_price\":\"9.390\",\"swing\":\"1.94\",\"diff_rate\":\"0.97\"},{\"trade_money\":\"1164999730.000\",\"diff_money\":\"0.26\",\"open_price\":\"9.000\",\"code\":\"601006\",\"date\":\"2017-11-21\",\"min_price\":\"8.960\",\"market\":\"sh\",\"trade_num\":\"1268321\",\"turnover\":\"0.853\",\"close_price\":\"9.290\",\"max_price\":\"9.350\",\"swing\":\"4.32\",\"diff_rate\":\"2.88\"},{\"trade_money\":\"471383497.000\",\"diff_money\":\"0.0\",\"open_price\":\"9.000\",\"code\":\"601006\",\"date\":\"2017-11-20\",\"min_price\":\"8.890\",\"market\":\"sh\",\"trade_num\":\"525579\",\"turnover\":\"0.354\",\"close_price\":\"9.030\",\"max_price\":\"9.060\",\"swing\":\"1.88\",\"diff_rate\":\"0.0\"},{\"trade_money\":\"971205411.000\",\"diff_money\":\"0.24\",\"open_price\":\"8.800\",\"code\":\"601006\",\"date\":\"2017-11-17\",\"min_price\":\"8.700\",\"market\":\"sh\",\"trade_num\":\"1093673\",\"turnover\":\"0.736\",\"close_price\":\"9.030\",\"max_price\":\"9.060\",\"swing\":\"4.1\",\"diff_rate\":\"2.73\"},{\"trade_money\":\"285205565.000\",\"diff_money\":\"-0.12\",\"open_price\":\"8.890\",\"code\":\"601006\",\"date\":\"2017-11-16\",\"min_price\":\"8.780\",\"market\":\"sh\",\"trade_num\":\"323445\",\"turnover\":\"0.218\",\"close_price\":\"8.790\",\"max_price\":\"8.890\",\"swing\":\"1.23\",\"diff_rate\":\"-1.35\"},{\"trade_money\":\"352421722.000\",\"diff_money\":\"-0.04\",\"open_price\":\"8.930\",\"code\":\"601006\",\"date\":\"2017-11-15\",\"min_price\":\"8.830\",\"market\":\"sh\",\"trade_num\":\"396352\",\"turnover\":\"0.267\",\"close_price\":\"8.910\",\"max_price\":\"8.960\",\"swing\":\"1.45\",\"diff_rate\":\"-0.45\"},{\"trade_money\":\"272978108.000\",\"diff_money\":\"-0.04\",\"open_price\":\"9.000\",\"code\":\"601006\",\"date\":\"2017-11-14\",\"min_price\":\"8.910\",\"market\":\"sh\",\"trade_num\":\"304990\",\"turnover\":\"0.205\",\"close_price\":\"8.950\",\"max_price\":\"9.010\",\"swing\":\"1.11\",\"diff_rate\":\"-0.44\"},{\"trade_money\":\"371140472.000\",\"diff_money\":\"0.15\",\"open_price\":\"8.840\",\"code\":\"601006\",\"date\":\"2017-11-13\",\"min_price\":\"8.820\",\"market\":\"sh\",\"trade_num\":\"415312\",\"turnover\":\"0.279\",\"close_price\":\"8.990\",\"max_price\":\"9.020\",\"swing\":\"2.26\",\"diff_rate\":\"1.7\"},{\"trade_money\":\"332588918.000\",\"diff_money\":\"-0.08\",\"open_price\":\"8.920\",\"code\":\"601006\",\"date\":\"2017-11-10\",\"min_price\":\"8.790\",\"market\":\"sh\",\"trade_num\":\"376272\",\"turnover\":\"0.253\",\"close_price\":\"8.840\",\"max_price\":\"8.930\",\"swing\":\"1.57\",\"diff_rate\":\"-0.9\"},{\"trade_money\":\"241490411.000\",\"diff_money\":\"0.04\",\"open_price\":\"8.870\",\"code\":\"601006\",\"date\":\"2017-11-09\",\"min_price\":\"8.850\",\"market\":\"sh\",\"trade_num\":\"271364\",\"turnover\":\"0.183\",\"close_price\":\"8.920\",\"max_price\":\"8.930\",\"swing\":\"0.9\",\"diff_rate\":\"0.45\"},{\"trade_money\":\"377205964.000\",\"diff_money\":\"0.02\",\"open_price\":\"8.860\",\"code\":\"601006\",\"date\":\"2017-11-08\",\"min_price\":\"8.830\",\"market\":\"sh\",\"trade_num\":\"424379\",\"turnover\":\"0.285\",\"close_price\":\"8.880\",\"max_price\":\"8.960\",\"swing\":\"1.47\",\"diff_rate\":\"0.23\"},{\"trade_money\":\"404532369.000\",\"diff_money\":\"0.03\",\"open_price\":\"8.820\",\"code\":\"601006\",\"date\":\"2017-11-07\",\"min_price\":\"8.780\",\"market\":\"sh\",\"trade_num\":\"457014\",\"turnover\":\"0.307\",\"close_price\":\"8.860\",\"max_price\":\"8.900\",\"swing\":\"1.36\",\"diff_rate\":\"0.34\"},{\"trade_money\":\"326183604.000\",\"diff_money\":\"-0.01\",\"open_price\":\"8.840\",\"code\":\"601006\",\"date\":\"2017-11-06\",\"min_price\":\"8.760\",\"market\":\"sh\",\"trade_num\":\"370228\",\"turnover\":\"0.249\",\"close_price\":\"8.830\",\"max_price\":\"8.860\",\"swing\":\"1.13\",\"diff_rate\":\"-0.11\"},{\"trade_money\":\"606428072.000\",\"diff_money\":\"-0.1\",\"open_price\":\"8.940\",\"code\":\"601006\",\"date\":\"2017-11-03\",\"min_price\":\"8.720\",\"market\":\"sh\",\"trade_num\":\"685665\",\"turnover\":\"0.461\",\"close_price\":\"8.840\",\"max_price\":\"8.950\",\"swing\":\"2.57\",\"diff_rate\":\"-1.12\"},{\"trade_money\":\"322562461.000\",\"diff_money\":\"0.03\",\"open_price\":\"8.920\",\"code\":\"601006\",\"date\":\"2017-11-02\",\"min_price\":\"8.840\",\"market\":\"sh\",\"trade_num\":\"361503\",\"turnover\":\"0.243\",\"close_price\":\"8.940\",\"max_price\":\"8.980\",\"swing\":\"1.57\",\"diff_rate\":\"0.34\"},{\"trade_money\":\"682116475.000\",\"diff_money\":\"-0.19\",\"open_price\":\"9.100\",\"code\":\"601006\",\"date\":\"2017-11-01\",\"min_price\":\"8.890\",\"market\":\"sh\",\"trade_num\":\"760051\",\"turnover\":\"0.511\",\"close_price\":\"8.910\",\"max_price\":\"9.100\",\"swing\":\"2.31\",\"diff_rate\":\"-2.09\"}]}}"
    if (respData):
        DataManager.writeOriginalData(respData)
        Logger.logTip("connect success! 返回的数据如下:\n %s" % respData)
        jsonAttrs = json.loads(respData)
        if jsonAttrs.get("showapi_res_code") != None and jsonAttrs.get(
                "showapi_res_code") == 0:
            Logger.logTip("getRequest success! url = [%s]" % url)
            bodyData = jsonAttrs.get("showapi_res_body")
            if (successBlock):
                successBlock(bodyData)
        else:
            errorMsg = jsonAttrs.get("showapi_res_error")
            Logger.logError("url respone errorcode! ErrorMsg = %s" % errorMsg)
            if (failureBlock):
                failureBlock(errorMsg)
    else:
        Logger.logError("网络链接错误!url = %s" % url)
示例#3
0
def CPConvergenceTest():
    value = []
    error = []
    eventFiles = dm.GetFileNames('\Samples')  # get filenames
    eventFiles_CP = dm.GetFileNames('\Samples_CP')

    for i in range(len(eventFiles)):
        p = dm.AmpGendf(eventFiles[i], False)  # generate particle data
        pbar = dm.AmpGendf(eventFiles_CP[i], True)  # generate CP particle data

        C_T = kin.Scalar_TP(kin.Vector_3(p['p_3']), kin.Vector_3(p['p_4']),
                            kin.Vector_3(
                                p['p_1']))  # calcualtes scalar triple product
        C_Tbar = -kin.Scalar_TP(
            kin.Vector_3(pbar['p_3']), kin.Vector_3(pbar['p_4']),
            kin.Vector_3(pbar['p_1']))  # -sign for parity flip

        A_T = kin.TP_Amplitude(C_T)  # calculate parity asymmetries
        A_Tbar = kin.TP_Amplitude(C_Tbar)

        A_CP = kin.A_CP(A_T, A_Tbar)  # calculate A_CP
        value.append(A_CP[0])
        error.append(A_CP[1])

    pt.ErrorPlot([np.linspace(1, 10, len(eventFiles)), value],
                 axis=True,
                 y_error=error,
                 x_axis="Number of Events ($10^{5}$)",
                 y_axis="$\mathcal{A}_{CP}$")  # plots data
示例#4
0
def Seed_test():
    fileNames = dm.GetFileNames('\seed_test')  # get filenames
    events = fileNames[0:5]  # split the dataset in half
    events_CP = fileNames[5:10]  # make this half CP data

    value = []
    error = []
    for i in range(5):
        p = dm.AmpGendf(events[i], False)  # generate particle data
        pbar = dm.AmpGendf(events_CP[i], True)

        C_T = kin.Scalar_TP(kin.Vector_3(p['p_3']), kin.Vector_3(p['p_4']),
                            kin.Vector_3(
                                p['p_1']))  # calcualtes scalar triple product
        C_Tbar = -kin.Scalar_TP(kin.Vector_3(
            pbar['p_3']), kin.Vector_3(pbar['p_4']), kin.Vector_3(pbar['p_1']))

        A_T = kin.TP_Amplitude(C_T)  # calculate parity asymmetries
        A_Tbar = kin.TP_Amplitude(C_Tbar)

        A_CP = kin.A_CP(A_T, A_Tbar)  # calculate A_CP
        value.append(A_CP[0])
        error.append(A_CP[1])

    pt.ErrorPlot([np.linspace(1, 5, 5), value],
                 axis=True,
                 y_error=error,
                 x_axis="Iteration",
                 y_axis="$\mathcal{A}_{CP}$")  # plots data
示例#5
0
 def __init__(self, trackFile, truthFile):
     self.trackManager = dm.DataManager(trackFile, hasId=True)
     self.truthManager = dm.DataManager(truthFile, hasId=True)
     self.IMPOSSIBLE_SCORE = 1.0E10
     self.NEW_TRACK_SCORE = 10.0
     self.ASSOC_GATE = 2.0
     self.initData()
示例#6
0
class OtherStuffWidget(QWidget):
    def __init__(self, parent=None):
        super().__init__(parent=parent)
        self.initUI()

        self.d_m = DataManager()
        self.d_m.add_event_handler(EventType.pose, self.newPoseData)

    def initUI(self):
        self.frame = QFrame(self)
        self.frame.setStyleSheet('background-color: white;')

        self.gestures = []
        self.files = [
            "images/make_fist.png", "images/wave_right.png",
            "images/wave_left.png", "images/spread_fingers.png",
            "images/unlock_gesture.png"
        ]
        for i in range(5):
            g = QLabel(self)
            g.setPixmap(QPixmap(self.files[i]))
            g.setScaledContents(True)
            self.gestures.append(g)

    def setGeometry(self, *__args):
        super().setGeometry(__args[0], __args[1], __args[2], __args[3])
        self.frame.resize(__args[2], __args[3])

        space = 10
        x = space
        y = space
        w = (__args[2] - space * 3) / 2
        if w * 2 + space * 3 > __args[2]:
            w = (__args[2] - space * 3) / 2
        h = w
        if h * 3 + space * 4 > __args[3]:
            h = (__args[3] - space * 4) / 3
        w = h

        for i in range(5):
            g = self.gestures[i]
            g.setGeometry(x, y, w, h)
            if (i + 1) % 2 == 0:
                x = space
                y = y + space + h
            else:
                x = x + space + w
            g.setStyleSheet("border-radius: " + repr(int(w / 2)) +
                            "px; background-color: white;")

    def newPoseData(self, event):
        data = event["data"]
        pose = int(data["pose"])
        if pose == 0:
            for g in self.gestures:
                g.setStyleSheet(g.styleSheet().replace("yellow", "white"))
        else:
            g = self.gestures[pose - 1]
            g.setStyleSheet(g.styleSheet().replace("white", "yellow"))
示例#7
0
def evaluate(data_set, checkpoint_dir = 'tmp/train_data'):
    with tf.Graph().as_default():


        # Don't specify number of epochs in validation set, otherwise that limits the training duration as the
        # validation set is 10 times smaller than the training set
        #images, labels = read_data.inputs(data_set=data_set, batch_size=BATCH_SIZE, num_epochs=None)
        if(data_set=="train"):
            images, labels =DataManager.read_tfr_queue(train_cnn.DATA_SOURCE_TRAIN,BATCH_SIZE)
        else:
            images, labels = DataManager.read_tfr_queue(DataManager.TFR_SAVE_DIR + train_cnn.DATA_SOURCE_VALIDATION, BATCH_SIZE)
        logits = model_cnn.inference(images)
        accuracy_curr_batch = model_cnn.evaluation(logits, labels)

        # Restore moving averages of the trained variables
        mov_avg_obj = tf.train.ExponentialMovingAverage(model_cnn.MOVING_AVERAGE_DECAY)
        variables_to_restore = mov_avg_obj.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                print('No checkpoint file found at %s' % checkpoint_dir)
                return

            coord = tf.train.Coordinator()

            try:
                threads = []
                for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                    threads.extend(qr.create_threads(sess, coord, daemon=True, start=True))

                num_iter = int(math.ceil(NUM_VALIDATION_EXAMPLES / BATCH_SIZE))
                step = 0
                acc_full_epoch = 0
                while step < num_iter and not coord.should_stop():
                    acc_batch_val = sess.run(accuracy_curr_batch)
                    acc_full_epoch += acc_batch_val
                    step += 1

                acc_full_epoch /= num_iter
                tf.summary.scalar('validation_accuracy', acc_full_epoch)
                summary_op = tf.summary.merge_all()
                #summary_writer = tf.train.SummaryWriter(EVAL_DATA_DIR)
                summary_writer = tf.summary.FileWriter(EVAL_DATA_DIR, sess.graph)
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

                print('Accuracy on full %s dataset = %.1f' % (data_set, acc_full_epoch))


            except Exception as e:
                coord.request_stop(e)

            coord.request_stop()

            coord.join(threads)
    def LoadTestData(self, NumberOfImagesToLoad, TestWriteToFileFeature):
        dm = DataManager.DataManager(True)

        # Locate the test folder where we store the test images.
        ParnetTestFolderPath = "C:\\FirebaseTestImages"
        ChildTestFolderPath = "C:\\FirebaseTestImages\\Images"

        # Retrieve all the contents from the test directory
        parentFolderFilesList = os.listdir(ParnetTestFolderPath)
        filename = parentFolderFilesList[0]
        for a in range(NumberOfImagesToLoad):
            # Rename the test image file and move it to the Images folder.
            oldFileName = "{}\\{}".format(ParnetTestFolderPath, filename)
            newFileName = "{}.png".format(self.GetCurrentDateTimeStamp())
            newFileLocation = "{}\\{}".format(ChildTestFolderPath, newFileName)
            fbNewFileLocation = "images/{}".format(newFileName)
            #Copy the file to child folder, TestImages
            shutil.copy(oldFileName, "{}\\{}".format(ChildTestFolderPath,
                                                     newFileName))
            print("New File Copied: {}\\{}".format(ChildTestFolderPath,
                                                   newFileName))

            # Add the new image to images folder in Firebase
            storage = self.firebase.storage()
            result = storage.child(fbNewFileLocation).put(newFileLocation)

            # Get the url of the newly image added to Firebase
            userToken = storage.credentials.access_token
            firebaseImageURL = storage.child(fbNewFileLocation).get_url(
                userToken)

            # Retrieve latitude and longitude to add to our test data.
            g = geocoder.google('Bluffton, SC')
            latitude = g.latlng[0]
            longitude = g.latlng[1]

            # Add test data to the YardsTasks table
            testData = {
                "Latitude": latitude,
                "Longitude": longitude,
                "CreatedDate": dm.GenerateDateTimeStamp(),
                "ModifiedDate": dm.GenerateDateTimeStamp(),
                "UserID": self.testUserID,
                "ImageURL": firebaseImageURL,
                "ImageName": newFileName,
                "Tags": "SOD",
                "ImageProcessed": 0,
                "TaskComplete": 0,
                "ImageClassified": ''
            }

            db = self.firebase.database()
            result = db.child("YardTasks").push(testData)

            if (TestWriteToFileFeature):
                dm = DataManager.DataManager(True)
                dm.WriteTasksToFile(result['name'], newFileName)

            time.sleep(3)
def onSuccessRequest(bodyData):
    Logger.log("is onSuccessRequest:: ---- %s" % str(bodyData))
    if bodyData != None and bodyData.get("list") != None:
        listData = bodyData.get("list")
        Logger.log("item.count = %s" % len(listData))
        if (len(listData) <= 0):
            return
        # for classifyList in listData:
        DataManager.saveJsonDataToFile(m_csvFileName, listData)
示例#10
0
    def __init__(self, parent, width=None, height=None):
        OwnFrame.__init__(self, parent, width, height)
        self.frame = super().getFrame()
        self.dataManager = DataManager()

        self._arrangeUI()
        self._retranslateAll()

        self._textInputRestrict()
        self._arrangeDataInWidgets()
示例#11
0
def main():
    """

    """
    books = scrap()

    for k in range(len(books)):
        DataManager.managecsv(books[str(k)])

    pass
def makeHeartbeatCall():
    from startServer import app
    with app.test_request_context():
        try:
            res = pyrequests.post(DataManager.getHeartbeatEndpoint(), headers=jsonHeaders,
                                  data=DataManager.getHeartbeatJson())
            if res.status_code != 200:
                LedChanger.lightErrorLedOn()
        except Exception as e:
            LedChanger.lightErrorLedOn()
            utils.printException(e)
示例#13
0
 def setup(self):
     #print("in")
     #print(self.grid)
     try:
         self.prologConnector = DataManager(self.row, self.column,
                                            self.grid)
         self.prologConnector.setup()
         self.availableSlot = self.prologConnector.getLenPark()
     except IndexError as e:
         message = "The file you are trying to load has an invalid dimension.\n Valid Dimensions is 25x25"
         messagebox.showerror("Invalid Dimension", message)
         m = Menu()
         m.main_loop()
def start_Process():      
    user_reply = input(colored.yellow('Save data to file? (y/n): '))
           
    if (user_reply == 'yes') or (user_reply == 'y'):
        keep_time = input(colored.yellow('Set Time Limit for Data Collection?(y/n): '))
        
        if keep_time == 'yes' or keep_time == 'Yes' or keep_time == 'y':
            DataManager.write_tofile( 0, True, True)            
                    
        else:
            size = input(colored.yellow('File Size(number of lines): '))
            DataManager.write_tofile(int(size), False, True)
                
    elif user_reply == 'n' or user_reply == 'no':
        keep_time = input(colored.yellow('Set Time Limit for Data Collection?(y/n): '))
        
        if keep_time == 'yes' or keep_time == 'Yes' or keep_time == 'y':
            DataManager.write_tofile( 0, True, False)            
                    
        else:
            size = input(colored.yellow('File Size(number of lines): '))
            DataManager.write_tofile(int(size), False, False)
                         
                
    else:
        puts(colored.red('---Invalid Input---'))
示例#15
0
    def execute(self, context):
        self.report({'INFO'}, "Training model %s..." %
                    context.scene.speech2anim_data.training_videos_path)
        os.chdir(
            bpy.utils.user_resource("SCRIPTS", "addons") + config.ADDON_PATH +
            '/src')
        reloadConfig(context)
        #Clean previous output
        #pdb.set_trace()
        wops.rmdir(
            config.TEMPDATA_OUTPUT[3:],
            wops.clear(
                bpy.utils.user_resource("SCRIPTS", "addons") +
                config.ADDON_PATH))
        wops.mkdir(
            config.TEMPDATA_OUTPUT[3:],
            wops.clear(
                bpy.utils.user_resource("SCRIPTS", "addons") +
                config.ADDON_PATH))

        d = context.scene.speech2anim_data
        DataManager.Train(d.training_videos_path, d.training_model_path)

        paths = DataManager.getTrainingVideoPaths(d.training_videos_path)
        #TODO: refactor
        #for every video in the training folder
        for path in paths:
            #get the name
            name = path.split('/')[-1:][0].split('.')[0]
            exists = False
            #if we don't have it in the list, add it
            for p in d.training_videos_list:
                if p.name == name:
                    exists = True

            if not exists:
                item = d.training_videos_list.add()
                item.path = path
                item.name = name

        for i, p in enumerate(d.training_videos_list):
            exists = False
            for path in paths:
                name = path.split('/')[-1:][0].split('.')[0]
                if p.name == name:
                    exists = True
            if not exists:
                d.training_videos_list.remove(i)

        return {'FINISHED'}
def thread_handle_sending_and_deleting_image(imagePath):
    try:
        with open(imagePath, 'rb') as img:
            imageBasename = os.path.basename(imagePath)
            files = {'img': (imageBasename, img, 'multipart/form-data')}
            with pyrequests.Session() as s:
                print("DataSender, starting to post image to server: " + str(imagePath))
                r = s.post(DataManager.getPhotoReceiveEndpoint(), files=files)
                print("DataManager.getPhotoReceiveEndpoint(), status code: " + str(r.status_code))
                if r.status_code == 200:
                    DataManager.deleteFile(imagePath)
    except Exception as e:
        utils.printException(e)

    DataManager.makeStorageCheck()
示例#17
0
    def init(self, pDataPath):
        self._dataManager = DataManager(pDataPath, self._threshold,
                                        self._limit, self._system)
        self._dataManager.readData()

        self._trackers = []
        self._printer = BoxPrinter(self._paletteWidth, self._thicknessLine)

        self._idLKEnumerator = 1
        self._idCSRTEnumerator = 1

        self._printStack = {}

        self._sortActivated = None
        self._centroidActivated = None
示例#18
0
def PrepareData(name, cut):
    p, pbar, weights, weightsbar = dm.ReadRealData(
        name, cut
    )  # get the particle dictionaries and weights, splits regular to conjugate
    p = BoostIntoRest(p)  # Boosts particles into the COM frame
    pbar = BoostIntoRest(pbar)
    return p, pbar, weights, weightsbar
示例#19
0
def GetData():
    X = FM.FeaturesData()
    y = DM.CreateSalesFrame()

    for i in y.index:

        if y['Week Number'][i] < 36 and y['Year'][i] == 2012:
            y = y.drop([i])

        elif y['Week Number'][i] > 44 and y['Year'][i] == 2019:
            y = y.drop([i])

    y = y.drop(columns=['Year', 'Week Number'])

    #We create training and testing data that fit with sklearn package

    X_train, X_test, y_train, y_test = ms.train_test_split(X,
                                                           y,
                                                           test_size=0.20,
                                                           random_state=0,
                                                           shuffle=False)

    #Do we need this ?

    sc_X = StandardScaler()
    X_train = sc_X.fit_transform(X_train)
    X_test = sc_X.transform(X_test)

    return X_train, X_test, y_train, y_test
示例#20
0
    def test3(self):
        self.dataManagerTest = DM.DataManager(
            self.params['ModelParams']['dirTest'],
            self.params['ModelParams']['dirResult3'],
            self.params['DataManagerParams'])
        self.dataManagerTest.loadTestData()

        net = caffe.Net(
            self.params['ModelParams']['prototxtTest'],
            os.path.join(
                self.params['ModelParams']['dirSnapshotsONOFF'],
                "_iter_" + str(self.params['ModelParams']['snapshotONOFF']) +
                ".caffemodel"), caffe.TEST)

        numpyImages = self.dataManagerTest.getNumpyImages()
        #        originNumpy = self.dataManagerTest.getNumpyImages()
        for key in numpyImages:
            mean = np.mean(numpyImages[key][numpyImages[key] > 0])
            std = np.std(numpyImages[key][numpyImages[key] > 0])

            numpyImages[key] -= mean
            numpyImages[key] /= std

        results = dict()

        for key in numpyImages:

            btch = np.reshape(numpyImages[key], [
                1, 1, numpyImages[key].shape[0], numpyImages[key].shape[1],
                numpyImages[key].shape[2]
            ])
            net.blobs['data'].data[...] = btch
            print numpyImages[key].shape
            out = net.forward()
            l = out["labelmap"]
            print l.shape
            labelmap = np.squeeze(l[0, 1, :, :, :])
            res = np.squeeze(labelmap)
            res = np.transpose(res, [2, 1, 0])
            res = np.transpose(res, [1, 0, 2])
            w = sitk.ImageFileWriter()
            filename, ext = splitext(key)

            #  io.imsave("/home/quan/Desktop/VNet/Results/" + filename + "_rotate" + ext,np.squeeze(res))
            # w.Execute(labelmap)
            #   results[key] = np.squeeze(labelmap)
            #   print "write result"
            #   toWrite = sitk.GetImageFromArray(results[key],isVector=False)
            #	    toWrite = sitk.Cast(toWrite, sitk.sitkUInt8)
            writer = sitk.ImageFileWriter()
            #   filename, ext = splitext(key)
            writer.SetFileName(
                os.path.join(self.params['ModelParams']['dirResult3'],
                             filename + "_rotate" + ext))
            #   writer.Execute(toWrite)
            #   original = np.squeeze(oribatch[0,0,:,:,:])
            im2 = sitk.GetImageFromArray(np.squeeze(res), isVector=False)
            im2 = sitk.Cast(sitk.RescaleIntensity(im2), sitk.sitkUInt8)
            #    writer.SetFileName("/home/quan/Desktop/VNet/TrainResult/" + filename + "_original" + ext)
            writer.Execute(im2)
示例#21
0
    def run(self):

        dataManagerTest = DM.DataManager(
            vnet_config.params['ModelParams']['dirTest'],
            vnet_config.params['ModelParams']['dirResult'],
            vnet_config.params['DataManagerParams'])
        #are you serious @fausto?????
        dataManagerTest.createImageFileList()
        dataManagerTest.loadImages()
        dataManagerTest.createGTFileList()
        dataManagerTest.loadGT()

        volumes = dataManagerTest.getNumpyImages()
        labels = dataManagerTest.getNumpyGT()

        #print dataManagerTest.sitkImages

        #print dataManagerTest.sitkGT

        #print volumes

        #print labels

        #inputs = [(1,2,3), (4,5,6), (6,7,8), (1,2,3)]

        #yield (01, volumes['image01.nii'], labels['label01.nii'])
        for key in volumes:
            image_num = re.findall("\d+", key)[0]
            print key
            print image_num
            yield (image_num, volumes[key],
                   labels['label' + str(image_num) + '.nii'])
示例#22
0
    def __init__(self, parent=None):
        super().__init__(parent=parent)

        self.data_manager = DataManager()
        self.data_manager.myo.interval.connect(self.interval)

        self.data_manager.add_event_handler(EventType.connected, self.connectionChanged)
        self.data_manager.add_event_handler(EventType.disconnected, self.connectionChanged)
        self.data_manager.add_event_handler(EventType.rssi, self.newRssiData)
        self.data_manager.add_event_handler(EventType.battery_level, self.newBatteryData)

        self.timer = QTimer()
        self.timer.timeout.connect(self.updateBattery)
        self.last_battery = -1

        self.initUI()
def makeNgrokAddressesCall():
    from startServer import app
    with app.test_request_context():
        try:
            res = pyrequests.post(DataManager.getNgrokAddressesEndpoint(), headers=jsonHeaders,
                                  data=DataManager.getNgrokAddressesAsJson(), timeout=10)
            if res.status_code != 200:
                if res.status_code == 404:  # Hotfix!!
                    return True
                LedChanger.lightErrorLedOn()
                return False
            return True
        except Exception as e:
            LedChanger.lightErrorLedOn()
            utils.printException(e)
            return False
示例#24
0
 def processScannedImages(self):
     newImagesForTrain =[]
     for image in self.image_processing_list:
         pageNum = Check_image_page(image.imagePath)
         newImagesForTrain= newImagesForTrain + ExportHandriteLinesFromScannedDoc(image, pageNum)
     numS, numE = DataManager.Insert_to_database(newImagesForTrain)
     return (numS, numE)
示例#25
0
def MultiSampleDalitzParameters(particles, CP=False, splitNum=100):
    data = dm.SplitEvents(particles,
                          splitNum)  # splits events into smaller sets
    parameters = []
    progress = 0
    """Calcualte CM variables and C_T"""
    for d in data:
        progress += 1
        print("\r" + str(round(progress / len(data) * 100, 2)), end="")
        params = DalitzParameters(d,
                                  CP)  # calulate statistics for the data set
        parameters.append(params)

    new_list = []
    """Merges each CM variable and C_T calulated for each data set"""
    for i in range(6):
        subset = []
        for j in range(len(parameters)):
            subset.append(parameters[j][i])
        new_list.append(subset)

    final_data = []
    """Puts the calculated values in a single list"""
    for i in range(6):
        final_data.append(np.concatenate(new_list[i]))

    return final_data
示例#26
0
def MultiSampleDalitzParameters(particles, split=10):
    data = dm.SplitEvents(particles, split)  # splits events into smaller sets
    parameters = []
    progress = 0

    """Calcualte CM variables"""
    for d in data:
        progress += 1
        print(progress/len(data) * 100)
        params = DalitzParameters(d)  # calulate statistics for the data set
        parameters.append(params)

    new_list = []
    """Merges each CM variable and C_T calulated for each data set"""
    for i in range(5):
        subset = []
        for j in range(len(parameters)):
            subset.append(parameters[j][i])
        new_list.append(subset)

    final_data = []
    """Puts the calculated values in a single list"""
    for i in range(5):
        subset = np.array(new_list[i])  # converts list into an array
        subset = list(subset.flatten('F'))  # flattens this list into a matrix, columns are differnet CM variables for different C_T
        lower = subset[:int(len(subset)/2)]  # gets C_T < 0 states
        lower = np.concatenate(lower).ravel()  # merge columns into one column
        upper = subset[int(len(subset)/2):]  # # gets C_T > 0 states
        upper = np.concatenate(upper).ravel()
        subset = [lower, upper]  # creates a list of uppr and lower values for the single CM variable
        final_data.append(subset)
    return final_data
    def test(self, snapnumber):
        # produce the results of the testing data
        torch.cuda.set_device(self.params['ModelParams']['device'])
        self.dataManagerTesting = DMoriginal.DataManager(
            self.params['ModelParams']['dirTest'],
            self.params['ModelParams']['dirResult'],
            self.params['DataManagerParams'],
            self.params['TestParams']['ProbabilityMap'])
        self.dataManagerTesting.loadTestData()

        #model = resnet3D.resnet34(nll = False)
        model = vnet.VNet(nll=False)

        prefix_save = os.path.join(self.params['ModelParams']['dirSnapshots'],
                                   self.params['ModelParams']['tailSnapshots'])
        name = prefix_save + str(snapnumber) + '_' + "checkpoint.pth.tar"
        checkpoint = torch.load(name)
        # load the snapshot into the model
        model.load_state_dict(checkpoint['state_dict'])
        model.cuda()
        #produce the segementation results
        results = self.getTestResultImages(
            model, self.params['TestParams']['ProbabilityMap'])

        for key in results:
            self.dataManagerTesting.writeResultsFromNumpyLabel(
                results[key], key)
    def __splitCurrentRecording(self):
        try:
            import DataManager
            from DataManager import deviceName, videoDir
            DataManager.createVideoDirIfNotExists()
            _video_path = str(videoDir) + str(deviceName) + "_" + str(datetime.datetime.now()).replace(" ", "_") + '.h264'

            print("c")
            self.__camera.split_recording(_video_path)
            print("d")
            path_to_return = self.__previous_monitoring_video_path
            self.__previous_monitoring_video_path = _video_path
            return path_to_return
        except Exception as e:
            utils.printException(e)
            return None
示例#29
0
def leer_entrenamiento():
    training, header = dm.read_csv()
    os.system("cls")
    print("Se leyeron", len(training), "datos.\n")
    os.system("pause")
    creador_de_arboles(training, header)
    pass
示例#30
0
def leer_test(tree):
    testing, header = dm.read_csv()
    os.system("cls")
    print("Se leyeron", len(testing), "datos.\n")
    os.system("pause")
    menu_predicciones(tree, testing, header)
    pass
示例#31
0
文件: VNet.py 项目: yao-matrix/VNet
    def test(self):
        self.dataManagerTest = DM.DataManager(self.params['ModelParams']['dirTest'], self.params['ModelParams']['dirResult'], self.params['DataManagerParams'])
        self.dataManagerTest.loadTestData()

        net = caffe.Net(self.params['ModelParams']['prototxtTest'],
                        os.path.join(self.params['ModelParams']['dirSnapshots'], "_iter_" + str(self.params['ModelParams']['snapshot']) + ".caffemodel"),
                        caffe.TEST)

        numpyImages = self.dataManagerTest.getNumpyImages()

        for key in numpyImages:
            mean = np.mean(numpyImages[key][numpyImages[key] > 0])
            std = np.std(numpyImages[key][numpyImages[key] > 0])

            numpyImages[key] -= mean
            numpyImages[key] /= std

        results = dict()

        for key in numpyImages:
            btch = np.reshape(numpyImages[key], [1, 1, numpyImages[key].shape[0], numpyImages[key].shape[1], numpyImages[key].shape[2]])

            net.blobs['data'].data[...] = btch

            out = net.forward()
            l = out["labelmap"]
            labelmap = np.squeeze(l[0, 1, :, :, :])

            results[key] = np.squeeze(labelmap)

            self.dataManagerTest.writeResultsFromNumpyLabel(np.squeeze(labelmap), key)
示例#32
0
    def update_context(self, action='APPEND'):
        """
        Context Broker updateContext function
            :param action: update context action ['APPEND', 'UPDATE', 'DELETE']
            :rtype : requests.models.Response
        """
        if action not in ['APPEND', 'UPDATE', 'DELETE']:
            msg = "ContextBroker.update_context():The action passed to the function was not valid"
            DM.data_manager_error(msg)

        if len(self.entity.get_entity_list()) == 0:
            msg = "ContextBroker.update_context(): Empty entity_list was passed to the function"
            DM.data_manager_error(msg)

        payload = {'contextElements': self.entity.get_entity_list(),
                   'updateAction': action}

        data = json.dumps(payload)
        url = self.CBurl+'/v1/updateContext'
        response = self.get_response(data, url)

        if response.status_code == 401:
            msg = "ContextBroker.query_context(): User token not authorized."
            DM.data_manager_error(msg)

        self.clean_all()

        return response
示例#33
0
    def get_auth_token(self):
        """
        Returns token IDM .
            :rtype : unicode
        """
        try:
            file_path = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))

            if not os.path.exists('%s/auth/auth.dat' % file_path):
                if not os.path.exists('%s/auth' % file_path):
                    os.mkdir('%s/auth' % file_path)
                with open('%s/auth/auth.dat' % file_path, 'w') as json_file:
                    j_data = json.dumps({'token': ''})
                    json_file.write(j_data)
                    json_file.close()

            with open('%s/auth/auth.dat' % file_path, 'r') as json_file:
                self.token = json.loads(json_file.read())['token']
                json_file.close()

        except Exception as e:
            msg = "OrionAction.get_auth_token(): %s" % e
            DM.data_manager_error(msg)
示例#34
0
    def get_response(self, data, url):
        """
        Context Broker request
            :param data:
            :param url:
            :rtype : requests.models.Response
        """
        try:
            if self.orion:
                headers = {'Content-Type': 'application/json', "X-Auth-Token": self.token, 'Accept': 'application/json'}
            else:
                headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}

            if self.tenant != '':
                headers['Fiware-Service'] = self.tenant
            if self.service_path != '':
                headers['Fiware-ServicePath'] = self.service_path

            response = requests.post(url, headers=headers, data=data)
            return response

        except requests.RequestException as e:
            msg = "ContextBroker.get_response(): %s" % e.message
            DM.data_manager_error(msg)
示例#35
0
文件: test.py 项目: coderChase/jxcnv
from DataManager import *
from hmm.Model import *
from hmm.ModelParams import *
import numpy as np

if __name__ == '__main__' :
    datafile = '8_DATA.PCA_normalized.filtered.sample_zscores.RD.txt'
    #datafile = 'data'
    outputfile = 'output'
    paramsfile = 'params.txt'

    print 'Loading data file...'
    dataloader = DataManager(datafile)
    params = dataloader.getParams(paramsfile)
    dataloader.skipHeadline()
    sample = dataloader.getNextSample()
    targets_list = dataloader.getTargetsList()
    output = file(outputfile, 'w')
    while sample :
        #target_index is used to split observations sequence
        target_index_begin = 0
        target_index_end = 0
        temp = 1
        for targets in targets_list:
            print 'Running HMM for sample[' + sample['sample_id'] + ']: ',
            print 'chr' + targets[0]._chr + ' [' + str(temp) + '\\' + str(len(targets_list)) + ']'
            temp += 1
            target_index_end = target_index_begin + len(targets)

            modelParams = ModelParams(params, targets)
            #the 'observations' of sample is splitted
示例#36
0
文件: jxcnv.py 项目: coderChase/jxcnv
def discover(args) :
    datafile = args.datafile
    outputfile = args.output
    paramsfile = args.params
    sample_req = args.sample
    sample_flag = False #used to check whether sample_req exists

    print 'Loading data file...'
    dataloader = DataManager(datafile)
    params = dataloader.getParams(paramsfile)
    dataloader.skipHeadline()
    sample = dataloader.getNextSample()
    targets_list = dataloader.getTargetsList()
    output = file(outputfile, 'w')
    output.write('SAMPLE_ID\tCNV\tFULL_INTERVAL\tINDEX\tINTERVAL\tREAD_DEPTH\n')
    while sample :
        if sample_req == '' or (sample_req != '' and sample['sample_id'] == sample_req):
            sample_flag = True
            #target_index is used to split observations sequence
            target_index_begin = 0
            target_index_end = 0
            temp = 1
            for targets in targets_list:
                print 'Running HMM for sample[' + sample['sample_id'] + ']: ',
                print 'chr' + targets[0]._chr + ' [' + str(temp) + '\\' + str(len(targets_list)) + ']'
                temp += 1
                target_index_end = target_index_begin + len(targets)

                modelParams = ModelParams(params, targets)
                #the 'observations' of sample is splitted
                model = Model(modelParams, sample['observations'][target_index_begin:target_index_end])
                pathlist = model.forwardBackward_Viterbi()
                dataloader.outputCNV(output, sample['sample_id'], targets, pathlist, sample['observations'][target_index_begin:target_index_end])
                target_index_begin = target_index_end
        sample = dataloader.getNextSample()

    output.close()
    dataloader.closeFile()

    if not sample_flag:
        print 'Could not find the sample_id specified.'
示例#37
0
def discover(args) :
    paramsfile = args.params
    sample_req = args.sample
    hetsnp = args.hetsnp
    tagsnp = args.tagsnp
    vcf_file = args.vcf

    if hetsnp == 'True' or hetsnp == 'TRUE':
        hetsnp = True
    else:
        hetsnp = False
    
    if tagsnp == 'True' or tagsnp == 'TRUE':
        tagsnp = True
    else:
        tagsnp = False

    datafile = args.rpkm_matrix
    f_dir = os.path.dirname(datafile)
    if f_dir != '':
        f_dir = f_dir + '/'

    if args.output:
		outputfile = f_dir + str(args.output)

    tagsnp_file = args.tagsnp_file
    mode = args.mode

    sample_flag = False #used to check whether sample_req exists

    # Build a reference set 
    if mode == 'single' or mode == 'baseline' or mode == 'reference' or mode == 'ref':
        print 'Building the reference dataset...'
        dataloader = DataManager(datafile)
        samples_np = dataloader.getAllSamples()
        dataloader.closeFile()
        print 'Baseline is Done.'

    print 'Loading data file...',
    dataloader = DataManager(datafile)
    print 'Done!'
    print 'Loading paramters...',
    params = dataloader.getParams(paramsfile)
    print 'Done!'
    dataloader.skipHeadline()
    sample = dataloader.getNextSample()

    targets_list = dataloader.getTargetsList()
    output_aux = file(outputfile+'.aux', 'w')
    output_aux.write('SAMPLE_ID\tCNV_TYPE\tFULL_INTERVAL\tINDEX\tINTERVAL\tREAD_DEPTH\n')
    output = file(outputfile,'w')
    output.write('SAMPLE_ID\tCNV_TYPE\tINTERVAL\tCHROMOSOME\tSTART\tSTOP\tLENGTH\n')

    if (hetsnp or tagsnp) and vcf_file == '':
        print 'Error: please indicate a vcf file!'
        system.exit(0)

    if vcf_file != '':
        vcf_reader = VCFReader(vcf_file)
    else:
	vcf_reader = False

    if tagsnp:
        print 'Loading tagSNP information ...',
        cnp_dict = vcf_reader.loadTagSNP(tagsnp_file)
        print 'Done!'

    while sample :
        if sample_req == '' or (sample_req != '' and sample['sample_id'] == sample_req):
            sample_flag = True
            print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ,sample_req,'......'

            #Renjie added: To check whether the VCF contains sample_req.
            vcf_checker = vcf.Reader(open(vcf_file,'r'))
            if sample['sample_id'] in vcf_checker.samples:
                sample_in_VCF = True
            elif sample_req in vcf_checker.samples:
                sample_in_VCF = True
            else:
                print 'No sample %s in VCF file.'%sample_req
                sample_in_VCF = False

            if hetsnp and sample_in_VCF :
                print 'Parsing SNV information from VCF file for: ' + sample['sample_id']
                snp_info = vcf_reader.getSNPInfo(sample['sample_id'], targets_list)

            if tagsnp and sample_in_VCF:
                print 'Analysing tagSNP information from tagSNP database for: ' + sample['sample_id'],
                cnp_list = vcf_reader.findTagSNPForSample(sample['sample_pop'], sample['sample_id'], cnp_dict)
                tagsnp_info_list = vcf_reader.findExonWithTagSNP(cnp_list, targets_list, overlap_threshold=0.5)
                print len(tagsnp_info_list)

            #estimate NB paramters from sample['observations']  
            sample_observations = []
            remove_list = []
            sample['observations'] = [ float(x) for x in sample['observations']]
            
            #slicing: target_index is used to split observations sequence
            target_index_begin = 0
            target_index_end = 0
            temp = 1

            sample_observations_list = []
            snp_info_list = []

            for i, targets in enumerate(targets_list):
                target_index_end = target_index_begin + len(targets)
                if hetsnp and sample_in_VCF:
                    snp_info_list.append(snp_info[target_index_begin:target_index_end])
                sample_observations_list.append(sample['observations'][target_index_begin:target_index_end])

                target_index_begin = target_index_end

            # Filtering:
            if mode == 'svd' or mode == 'SVD' or mode == 'pooled' or mode == 'pooled-sample':
                for i in range(len(sample_observations_list)):
                    sample_observations_list[i] = ndarray.tolist(stats.zscore(sample_observations_list[i]))

            elif mode == 'baseline' or mode == 'reference'  or mode == 'single' or mode == 'single-sample':
                # filtering lists whose observation equals to 0

                for i in range(len(targets_list)):
                    rem_index = []
                    for j in range(len(targets_list[i])):
                        value = sample_observations_list[i][j]
                        if np.isnan(float(value)):
                            rem_index.append(j)
                    #filter target_list, snp_list and observation_list    
                    targets_list[i] = jf.filter_list_by_list(targets_list[i], rem_index)
                    sample_observations_list[i] = jf.filter_list_by_list(sample_observations_list[i], rem_index)
                    if hetsnp and sample_in_VCF:
                        snp_info_list[i] = jf.filter_list_by_list(snp_info_list[i], rem_index)
                    if tagsnp and sample_in_VCF:
                        tagsnp_info_list[i] = jf.filter_list_by_list(tagsnp_info_list[i], rem_index)

                #Parameters estimation
                observations_all_list = []
                for i in range(len(sample_observations_list)):
                    observations_all_list.extend(sample_observations_list[i])

                parameterLoader = ParameterEstimation(observations_all_list)
                parameterList = parameterLoader.fit(observations_all_list,0.01,0.99)
                print "Estimated Paramters: ",parameterList
                params.append(parameterList[0])#mu
                params.append(parameterList[1])#sd
                    
            for i, targets in enumerate(targets_list):
                print 'Running HMM for sample[' + sample['sample_id'] + ']: ',
                print 'chr' + targets[0]._chr + ' [' + str(temp) + '|' + str(len(targets_list)) + ']'
                temp += 1
		
                #Run the HMM 
                if not hetsnp and not tagsnp:
                    modelParams = ModelParams(mode, params, targets, het_nums=0, tagsnp=0)
                elif sample_in_VCF and hetsnp and not tagsnp:
                	modelParams = ModelParams(mode, params, targets, snp_info_list[i], tagsnp=0)
                elif sample_in_VCF and not hetsnp and tagsnp:
                	modelParams = ModelParams(mode, params, targets, het_nums=0, tagsnp=tagsnp_info_list[i])
                elif sample_in_VCF and hetsnp and tagsnp:
                	modelParams = ModelParams(mode, params, targets, snp_info_list[i], tagsnp_info_list[i])
                elif not sample_in_VCF and hetsnp and tagsnp:
                    modelParams = ModelParams(mode, params, targets, het_nums=0, tagsnp=0)
                else:
                    pdb.set_trace()
	
                model = Model(mode, modelParams, sample_observations_list[i])
                pathlist = list()
                
                if vcf_reader and sample_in_VCF:
                    pathlist = model.forwardBackward_Viterbi(mode, if_snp = True)
                else:
                    pathlist = model.forwardBackward_Viterbi(mode, if_snp = False)
                dataloader.outputCNVaux(output_aux, sample['sample_id'], targets, pathlist, sample_observations_list[i])
                dataloader.outputCNV(output, sample['sample_id'], targets, pathlist, sample_observations_list[i])

        sample = dataloader.getNextSample()

    output.close()
    output_aux.close()
    dataloader.closeFile()

    if not sample_flag:
        print 'Could not find the sample_id specified.'
示例#38
0
文件: main.py 项目: rex8312/Test0710
def game():
    tick = 0
    state = [[(list(), list()) for x in range(3)] for y in range(3)]

    red_hq = HQ(TEAM.RED, 1, 0)
    blue_hq = HQ(TEAM.BLUE, 1, 2)

    state[0][1][TEAM.RED].append(red_hq)
    state[2][1][TEAM.BLUE].append(blue_hq)

    red_player_ai = RedPlayerAI(red_hq)
    blue_player_ai = BluePlayerAI(blue_hq)

    if VISUAL:
        gs = pl.GridSpec(5, 1)
        state_view = pl.subplot(gs[:3, :])
        assets_view = pl.subplot(gs[3, :])
        progress_view = pl.subplot(gs[4, :])

    data_manager = DataManager()
    data_manager.reset()
    model = data_manager.get_model()
    blue_player_ai.model = model

    result = RESULT.DRAW
    while True:
        if red_player_ai.hq.hp <= 0 and blue_player_ai.hq.hp <= 0:
            print "DRAW"
            data_manager.add_win(RESULT.DRAW)
            break
        elif red_player_ai.hq.hp <= 0:
            print "BLUE WIN"
            result = RESULT.BLUE_WIN
            data_manager.add_win(RESULT.BLUE_WIN)
            break
        elif blue_player_ai.hq.hp <= 0:
            print "RED_WIN"
            result = RESULT.RED_WIN
            data_manager.add_win(RESULT.RED_WIN)
            break

        # print 'R', red_player_ai.money, red_hq.hp
        # print 'B', blue_player_ai.money, blue_hq.hp
        state = blue_player_ai.act(state, q=data_manager.transform(state, red_player_ai, blue_player_ai))
        state = red_player_ai.act(state)

        state = group_ai(state)
        state = update(state)
        red_player_ai.money += 1
        blue_player_ai.money += 1
        red_asstes, blue_assets = data_manager.evaluate_state(state, red_player_ai, blue_player_ai)
        data_manager.add_sa(tick, red_player_ai, blue_player_ai, state, red_asstes, blue_assets)
        state = update(state)
        if VISUAL:
            draw(state_view, assets_view, progress_view, state, data_manager)
        tick += 1
        # print

    if VISUAL:
        pl.pause(3)
    data_manager.save()
    return result