Exemple #1
0
    def upload_file(self, url, update=False, local_path=None,
                    size=0, threaded=False):
        """Adds the file to the OSF project.
        If containing folder doesn't exist then it will be created recursively

        update is used if the file already exists but needs updating (version
        will be incremented).
        """
        if threaded:
            if self.uploader is None or \
                    self.uploader.status != NOT_STARTED:  # can't re-use
                self.uploader = PushPullThread(
                    session=self, kind='push',
                    finished_callback=self.finished_uploads)
            self.uploader.add_asset(url, local_path, size)
        else:
            with open(local_path, 'rb') as f:
                reply = self.put(url, data=f, timeout=10.0)
            with open(local_path, 'rb') as f:
                local_md5 = hashlib.md5(f.read()).hexdigest()
            if reply.status_code not in [200, 201]:
                raise exceptions.HTTPSError(
                    "URL:{}\nreply:{}"
                    .format(url, json.dumps(reply.json(), indent=2)))
            node = FileNode(self, reply.json()['data'])
            if local_md5 != node.json['attributes']['extra']['hashes']['md5']:
                raise exceptions.OSFError(
                    "Uploaded file did not match existing SHA. "
                    "Maybe it didn't fully upload?")
            logging.info("Uploaded (unthreaded): ".format(local_path))
            return node
Exemple #2
0
    def setCurrent(self, calibration=-1):
        """
        Sets the current calibration for this monitor.
        Note that a single file can hold multiple calibrations each
        stored under a different key (the date it was taken)

        The argument is either a string (naming the calib) or an integer
        **eg**:

            ``myMon.setCurrent'mainCalib')``
            fetches the calibration named mainCalib
            ``calibName = myMon.setCurrent(0)``
            fetches the first calibration (alphabetically) for this monitor
            ``calibName = myMon.setCurrent(-1)``
            fetches the last alphabetical calib for this monitor (this is default)
            If default names are used for calibs (ie date/time stamp) then
            this will import the most recent.
        """
        #find the appropriate file
        #get desired calibration name if necess
        if type(calibration) in [str, unicode] and (calibration in self.calibNames):
            self.currentCalibName = calibration
        elif type(calibration)==int and calibration<=len(self.calibNames):
            self.currentCalibName = self.calibNames[calibration]
        else:
            print "No record of that calibration"
            return False

        self.currentCalib = self.calibs[self.currentCalibName]      #do the import
        if self.autoLog:
            logging.info("Loaded calibration from:%s" %self.currentCalibName)

        return self.currentCalibName
Exemple #3
0
 def mode(self, value):
     if value in [None, '']:
         self.__dict__['mode'] = ''
     elif ('mode' in self.__dict__) and value==self.mode:
         return #nothing to do here. Move along please
     elif value=='status':
         self.sendMessage('$statusScreen\r')
         self.__dict__['mode'] = 'status'
     elif 'storage' in value.lower():
         self.sendMessage('$USB_massStorage\r')
         self.__dict__['mode'] = 'massStorage'
         logging.info('Switched %s to %s mode' %(self.info['ProductType'], self.__dict__['mode']))
     elif value.startswith('bits'):
         self.sendMessage('$BitsPlusPlus\r')
         self.__dict__['mode'] = 'bits++'
         self.setLUT()
         logging.info('Switched %s to %s mode' %(self.info['ProductType'], self.__dict__['mode']))
     elif value.startswith('mono'):
         self.sendMessage('$monoPlusPlus\r')
         self.__dict__['mode'] = 'mono++'
         logging.info('Switched %s to %s mode' %(self.info['ProductType'], self.__dict__['mode']))
     elif value.startswith('colo'):
         self.sendMessage('$colorPlusPlus\r')
         self.__dict__['mode'] = 'color++'
         logging.info('Switched %s to %s mode' %(self.info['ProductType'], self.__dict__['mode']))
     elif value.startswith('auto'):
         self.sendMessage('$autoPlusPlus\r')
         self.__dict__['mode'] = 'auto++'
         logging.info('Switched %s to %s mode' %(self.info['ProductType'], self.__dict__['mode']))
Exemple #4
0
 def add_file(self, asset, update=False, new_path=None, threaded=False):
     """Adds the file to the OSF project.
     If containing folder doesn't exist then it will be created recursively
     update is used if the file already exists but needs updating (version
     will be incremented).
     """
     # get the url and local path
     local_path = asset['full_path']
     if new_path is None:
         new_path = asset['path']
     if update:
         url_upload = asset['links']['upload']
         logging.info("Updating file : {}".format(asset['path']))
     else:
         container, name = os.path.split(new_path)
         folder_asset = self.add_container(container)
         url_upload = folder_asset['links']['upload']
         if not url_upload.endswith("?kind=file"):
             url_upload += "?kind=file"
         url_upload += "&name={}".format(name)
         # do the upload
         logging.info("Uploading file {} to container:{}"
                      .format(name, folder_asset['path']))
     if 'size' in asset:
         size = asset['size']
     else:
         size = 0
     self.session.upload_file(url=url_upload, local_path=local_path,
                              size=size, threaded=threaded)
Exemple #5
0
    def updatePthFile(self, oldName, newName):
        """Searches site-packages for .pth files and replaces any instance of
        `oldName` with `newName`, where the names likely have the form PsychoPy-1.60.04
        """
        from distutils.sysconfig import get_python_lib

        siteDir = get_python_lib()
        pthFiles = glob.glob(os.path.join(siteDir, "*.pth"))
        enclosingSiteDir = os.path.split(siteDir)[
            0
        ]  # sometimes the site-packages dir isn't where the pth files are kept?
        pthFiles.extend(glob.glob(os.path.join(enclosingSiteDir, "*.pth")))
        nUpdates = 0  # no paths updated
        info = ""
        for filename in pthFiles:
            lines = open(filename, "r").readlines()
            needSave = False
            for lineN, line in enumerate(lines):
                if oldName in line:
                    lines[lineN] = line.replace(oldName, newName)
                    needSave = True
            if needSave:
                try:
                    f = open(filename, "w")
                    f.writelines(lines)
                    f.close()
                    nUpdates += 1
                    logging.info("Updated PsychoPy path in %s" % filename)
                except:
                    info += "Failed to update PsychoPy path in ", filename
                    return -1, info
        return nUpdates, info
    def __init__(self, files, threads=3, verbose=False):
        """Like `Speech2Text()`, but takes a list of sound files or a directory name to search
        for matching sound files, and returns a list of `(filename, response)` tuples.
        `response`'s are described in `Speech2Text.getResponse()`.

        Can use up to 5 concurrent threads. Intended for
        post-experiment processing of multiple files, in which waiting for a slow response
        is not a problem (better to get the data).

        If `files` is a string, it will be used as a directory name for glob
        (matching all `*.wav`, `*.flac`, and `*.spx` files).
        There's currently no re-try on http error."""
        list.__init__(self)  # [ (file1, resp1), (file2, resp2), ...]
        maxThreads = min(threads, 5)  # I get http errors with 6
        self.timeout = 30
        if type(files) == str and os.path.isdir(files):
            f = glob.glob(os.path.join(files, '*.wav'))
            f += glob.glob(os.path.join(files, '*.flac'))
            f += glob.glob(os.path.join(files, '*.spx'))
            fileList = f
        else:
            fileList = list(files)
        web.requireInternetAccess()  # needed to access google's speech API
        for i, filename in enumerate(fileList):
            gs = Speech2Text(filename, level=5)
            self.append((filename, gs.getThread()))  # tuple
            if verbose:
                logging.info("%i %s" % (i, filename))
            while self._activeCount() >= maxThreads:
                core.wait(.1, 0)  # idle at max count
def rush(value=True):    
    """Raise the priority of the current thread/process 
    Win32 and OS X only so far - on linux use os.nice(niceIncrement)
    
    Set with rush(True) or rush(False)
    
    Beware and don't take priority until after debugging your code
    and ensuring you have a way out (e.g. an escape sequence of
    keys within the display loop). Otherwise you could end up locked
    out and having to reboot!
    """
    if importCtypesFailed: return False
    
    if value:
        bus = getBusFreq()
        extendedPolicy=_timeConstraintThreadPolicy()
        extendedPolicy.period=bus/160 #number of cycles in hz (make higher than frame rate)
        extendedPolicy.computation=bus/320#half of that period
        extendedPolicy.constrain= bus/640#max period that they should be carried out in
        extendedPolicy.preemptible=1
        extendedPolicy=getThreadPolicy(getDefault=True, flavour=THREAD_TIME_CONSTRAINT_POLICY)
        err=cocoa.thread_policy_set(cocoa.mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, 
            ctypes.byref(extendedPolicy), #send the address of the struct
            THREAD_TIME_CONSTRAINT_POLICY_COUNT)
        if err!=KERN_SUCCESS:
            logging.error('Failed to set darwin thread policy, with thread_policy_set')
        else:
            logging.info('Successfully set darwin thread to realtime')
    else:
        #revert to default policy
        extendedPolicy=getThreadPolicy(getDefault=True, flavour=THREAD_STANDARD_POLICY)
        err=cocoa.thread_policy_set(cocoa.mach_thread_self(), THREAD_STANDARD_POLICY, 
            ctypes.byref(extendedPolicy), #send the address of the struct
            THREAD_STANDARD_POLICY_COUNT)
    return True
Exemple #8
0
 def __init__(self,
              visible=True,
              newPos=None,
              win=None):
     super(Mouse, self).__init__()
     self.visible = visible
     self.lastPos = None
     self.prevPos = None  # used for motion detection and timing
     if win:
         self.win = win
     else:
         try:
             # to avoid circular imports, core.openWindows is defined
             # by visual.py and updated in core namespace;
             # it's circular to "import visual" here in event
             self.win = psychopy.core.openWindows[0]()
             logging.info('Mouse: using default window')
         except (NameError, IndexError):
             logging.error('Mouse: failed to get a default visual.Window'
                           ' (need to create one first)')
             self.win = None
     # for builder: set status to STARTED, NOT_STARTED etc
     self.status = None
     self.mouseClock = psychopy.core.Clock()
     self.movedistance = 0.0
     # if pygame isn't initialised then we must use pyglet
     global usePygame
     if havePygame and not pygame.display.get_init():
         usePygame = False
     if not usePygame:
         global mouseButtons
         mouseButtons = [0, 0, 0]
     self.setVisible(visible)
     if newPos is not None:
         self.setPos(newPos)
Exemple #9
0
 def token(self, token, save=None):
     """Set the token for this session and check that it works for auth
     """
     self.__dict__['token'] = token
     if token is None:
         headers = {}
     else:
         headers = {
             'Authorization': 'Bearer {}'.format(token),
         }
     self.headers.update(headers)
     # then populate self.userID and self.userName
     resp = self.get(constants.API_BASE+"/users/me/", timeout=10.0)
     if resp.status_code != 200:
         raise exceptions.AuthError("Invalid credentials trying to get "
                                    "user data:\n{}".format(resp.json()))
     else:
         logging.info("Successful authentication with token")
     json_resp = resp.json()
     self.authenticated = True
     data = json_resp['data']
     self.user_id = data['id']
     self.user_full_name = data['attributes']['full_name']
     # update stored tokens
     if save is None:
         save = self.remember_me
     if save and self.username is not None:
         tokens = TokenStorage()
         tokens[self.username] = token
         tokens.save()
def _checkout(requestedVersion):
    """Look for a Maj.min.patch requested version, download (fetch) if needed.
    """
    # Check tag of repo
    if currentTag() == requestedVersion:
        return requestedVersion

    # See if the tag already exists in repos
    if requestedVersion not in _localVersions(forceCheck=True):
        # Grab new tags
        msg = _translate("Couldn't find version {} locally. Trying github...")
        logging.info(msg.format(requestedVersion))
        subprocess.check_output('git fetch github'.split())
        # is requested here now? forceCheck to refresh cache
        if requestedVersion not in _localVersions(forceCheck=True):
            msg = _translate("{} is not currently available.")
            logging.error(msg.format(requestedVersion))
            return ''

    # Checkout the requested tag
    cmd = ['git', 'checkout', requestedVersion]
    out = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
                                  cwd=VERSIONSDIR)
    logging.debug(out)
    logging.exp('Success:  ' + ' '.join(cmd))
    return requestedVersion
Exemple #11
0
    def doGammaFits(self, levels, lums):
        linMethod = self.currentMon.getLinearizeMethod()

        if linMethod==4:
            logging.info('Fitting gamma equation (%i) to luminance data' % linMethod)
            currentCal = numpy.ones([4,6],'f')*numpy.nan
            for gun in [0,1,2,3]:
                gamCalc = monitors.GammaCalculator(levels, lums[gun,:], eq=linMethod)
                currentCal[gun,0]=gamCalc.min#min
                currentCal[gun,1]=gamCalc.max#max
                currentCal[gun,2]=gamCalc.gamma#gamma
                currentCal[gun,3]=gamCalc.a#gamma
                currentCal[gun,4]=gamCalc.b#gamma
                currentCal[gun,5]=gamCalc.k#gamma
        else:
            currentCal = numpy.ones([4,3],'f')*numpy.nan
            logging.info('Fitting gamma equation (%i) to luminance data' % linMethod)
            for gun in [0,1,2,3]:
                gamCalc = monitors.GammaCalculator(levels, lums[gun,:], eq=linMethod)
                currentCal[gun,0]=lums[gun,0]#min
                currentCal[gun,1]=lums[gun,-1]#max
                currentCal[gun,2]=gamCalc.gamma#gamma

        self.gammaGrid.setData(currentCal)
        self.currentMon.setGammaGrid(currentCal)
        self.unSavedMonitor=True
def get_screen_res():
    """
    Funcion that check current screen resolution. Raise OSError if can't recognise OS!
    * :return: (width, height) tuple with screen resolution.
    """
    import platform

    system = platform.system()
    if 'Linux' in system:
        import subprocess
        import re

        output = subprocess.Popen('xrandr | grep "\*" | cut -d" " -f4', shell=True, stdout=subprocess.PIPE)
        output = output.communicate()[0]
        valid_res = lambda x: re.match('^\d{3,4}x\d{3,4}$', x)
        if not valid_res(output):
            output = subprocess.Popen(' xdpyinfo | grep dimensions | cut -d" " -f7', shell=True, stdout=subprocess.PIPE)
            output = output.communicate()[0]
        if not valid_res(output):
            logging.ERROR('OS ERROR - no way of determine screen res')
            raise OSError(
                "Humanity need more time to come up with efficient way of checking screen resolution of your hamster")
        width, height = map(int, output.split('x'))
    elif 'Windows' in system:
        from win32api import GetSystemMetrics

        width = int(GetSystemMetrics(0))
        height = int(GetSystemMetrics(1))
    else:  # can't recognise OS
        logging.ERROR('OS ERROR - no way of determine screen res')
        raise OSError("get_screen_res function can't recognise your OS")
    logging.info('Screen res set as: {}x{}'.format(width, height))
    return OrderedDict(width=width, height=height)
Exemple #13
0
    def saveAsPickle(self, fileName, fileCollisionMethod='rename'):
        """Basically just saves a copy of the handler (with data) to a
        pickle file.

        This can be reloaded if necessary and further analyses carried out.

        :Parameters:

            fileCollisionMethod: Collision method passed to
            :func:`~psychopy.tools.fileerrortools.handleFileCollision`
        """
        if self.thisTrialN < 1 and self.thisRepN < 1:
            # if both are < 1 we haven't started
            if self.autoLog:
                logging.info('.saveAsPickle() called but no trials completed.'
                             ' Nothing saved')
            return -1
        # otherwise use default location
        if not fileName.endswith('.psydat'):
            fileName += '.psydat'

        f = openOutputFile(fileName, append=False,
                           fileCollisionMethod=fileCollisionMethod)
        pickle.dump(self, f)
        f.close()
        logging.info('saved data to %s' % f.name)
Exemple #14
0
    def __init__(self, name,
        width=None,
        distance=None,
        gamma=None,
        notes=None,
        useBits=None,
        verbose=True,
        currentCalib={},
         ):
        """
        """

        #make sure that all necessary settings have some value
        self.__type__ = 'psychoMonitor'
        self.name = name
        self.currentCalib = currentCalib
        self.currentCalibName = strFromDate(time.localtime())
        self.calibs = {}
        self.calibNames = []
        self._gammaInterpolator=None
        self._gammaInterpolator2=None
        self._loadAll()
        if len(self.calibNames)>0:
            self.setCurrent(-1) #will fetch previous vals if monitor exists
        else:
            self.newCalib()

        logging.info(self.calibNames)

        #overide current monitor settings with the vals given
        if width: self.setWidth(width)
        if distance: self.setDistance(distance)
        if gamma: self.setGamma(gamma)
        if notes: self.setNotes(notes)
        if useBits!=None: self.setUseBits(useBits)
Exemple #15
0
    def saveAsWideText(self, fileName, delim=None,
                       matrixOnly=False,
                       appendFile=False,
                       encoding='utf-8',
                       fileCollisionMethod='rename'):
        """Saves a long, wide-format text file, with one line representing
        the attributes and data for a single trial. Suitable for analysis
        in R and SPSS.

        If `appendFile=True` then the data will be added to the bottom of
        an existing file. Otherwise, if the file exists already it will
        be overwritten

        If `matrixOnly=True` then the file will not contain a header row,
        which can be handy if you want to append data to an existing file
        of the same format.

        encoding:
            The encoding to use when saving a the file. Defaults to `utf-8`.

        fileCollisionMethod:
            Collision method passed to
            :func:`~psychopy.tools.fileerrortools.handleFileCollision`

        """
        # set default delimiter if none given
        if delim is None:
            delim = genDelimiter(fileName)

        # create the file or send to stdout
        f = openOutputFile(
            fileName, append=appendFile, delim=delim,
            fileCollisionMethod=fileCollisionMethod, encoding=encoding)

        names = self._getAllParamNames()
        names.extend(self.dataNames)
        # names from the extraInfo dictionary
        names.extend(self._getExtraInfo()[0])
        # write a header line
        if not matrixOnly:
            for heading in names:
                f.write(u'%s%s' % (heading, delim))
            f.write('\n')
        # write the data for each entry

        for entry in self.entries:
            for name in names:
                if name in entry:
                    ename = str(entry[name])
                    if ',' in ename or '\n' in ename:
                        fmt = u'"%s"%s'
                    else:
                        fmt = u'%s%s'
                    f.write(fmt % (entry[name], delim))
                else:
                    f.write(delim)
            f.write('\n')
        if f != sys.stdout:
            f.close()
        logging.info('saved data to %r' % f.name)
Exemple #16
0
 def apply_mv_remote(self, asset, new_path, threaded=False):
     proj = self.proj()
     new_folder, new_name = os.path.split(new_path)
     proj.osf.rename_file(asset, new_path)
     logging.info("Sync.Changes request: Move file remote: {} -> {}"
                  .format(asset['path'], new_path))
     return 1
Exemple #17
0
    def __init__(self,
                 port=None,
                 sendBreak=False,
                 smoothing=False,
                 bufferSize=262144):
        # if we're trying to send the break signal then presumably the device
        # is sleeping
        if sendBreak:
            checkAwake = False
        else:
            checkAwake = True
        # run initialisation; parity = enable parity checking
        super(BlackBoxToolkit, self).__init__(port,
                                              baudrate=230400, eol="\r\n",
                                              parity='N',
                                              pauseDuration=1.0,  # 1 second pause!! slow device
                                              checkAwake=checkAwake)
        if sendBreak:
            self.sendBreak()
            time.sleep(3.0)  # give time to reset

        if smoothing == False:
            # For use with CRT monitors which require smoothing. LCD monitors do not.
            # Remove smoothing for optos, but keep mic smoothing - refer to BBTK handbook re: mic smoothing latency
            # Important to remove smoothing for optos, as smoothing adds 20ms delay to timing.
            logging.info("Opto sensor smoothing removed.  Mic1 and Mic2 smoothing still active.")
            self.setSmoothing('11000000')
            self.pause()

        try: # set buffer size - can make proportional to size of data (32 bytes per line * events)+1000
            self.com.set_buffer_size(bufferSize)
        except Exception:
            logging.warning("Could not set buffer size. The default buffer size for Windows is 4096 bytes.")
Exemple #18
0
    def create_project(self, title, descr="", tags=[], public=False,
                       category='project'):
        url = "{}/nodes/".format(constants.API_BASE, self.user_id)
        if type(tags) != list:  # given a string so convert to list
            tags = tags.split(",")

        body = {
            'data': {
                'type': 'nodes',
                'attributes': {
                    'title': title,
                    'category': category,
                    'description': descr,
                    'tags': tags,
                    'public': public,
                }
            }
        }
        reply = self.post(
            url,
            data=json.dumps(body),
            headers=self.headers,
            timeout=10.0)
        if reply.status_code not in [200, 201]:
            raise exceptions.OSFError("Failed to create project at:\n  {}"
                                      .format(url))
        project_node = OSFProject(session=self, id=reply.json()['data'])
        logging.info("Successfully created project {}".format(project_node.id))
        return project_node
Exemple #19
0
def _checkoutRequested(requestedVersion):
    """Look for a tag matching the request, return it if found
    or return None for the search.
    """
    # Check tag of repo
    if getCurrentTag() == requestedVersion:  # nothing to do!
        return 1

    # See if the tag already exists in repos (no need for internet)
    cmd = 'git tag'.split()
    versions = subprocess.check_output(cmd, cwd=VERSIONSDIR)
    if requestedVersion not in versions:
        # Grab new tags
        msg = "Couldn't find version %r locally. Trying github..."
        logging.info(msg % requestedVersion)
        cmd = 'git fetch github'.split()
        out = subprocess.check_output(cmd)
        # after fetching from github check if it's there now!
        versions = subprocess.check_output(['git', 'tag'], cwd=VERSIONSDIR)
        if requestedVersion not in versions:
            msg = "%r is not a valid version. Please choose one of:  %r"
            logging.error(msg % (requestedVersion, versions.split()))
            return 0

    # Checkout the requested tag
    cmd = 'git checkout %s' % requestedVersion
    out = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT,
                                  cwd=VERSIONSDIR)
    logging.debug(out)
    return 1
Exemple #20
0
    def __init__(self, port, verbose=None):
        super(PR650, self).__init__()
        if type(port) in (int, float):
            # add one so that port 1=COM1
            self.portNumber = port
            self.portString = 'COM%i' % self.portNumber
        else:
            self.portString = port
            self.portNumber = None
        self.isOpen = 0
        self.lastQual = 0
        self.type = 'PR650'
        self.com = False
        self.OK = True  # until we fail

        self.codes = {'OK': '000\r\n',  # this is returned after measure
                      '18': 'Light Low',  # returned at beginning of data
                      '10': 'Light Low',
                      '00': 'OK'}

        # try to open the port
        _linux = sys.platform.startswith('linux')
        if sys.platform in ('darwin', 'win32') or _linux:
            try:
                self.com = serial.Serial(self.portString)
            except Exception:
                msg = ("Couldn't connect to port %s. Is it being used by"
                       " another program?")
                self._error(msg % self.portString)
        else:
            msg = "I don't know how to handle serial ports on %s"
            self._error(msg % sys.platform)
        # setup the params for PR650 comms
        if self.OK:
            self.com.baudrate = 9600
            self.com.parity = 'N'  # none
            self.com.stopbits = 1
            try:
                # Pyserial >=2.6 throws an exception when trying to open a
                # serial port that is already open. Catching that exception
                # is not an option here because PySerial only defines a
                # single exception type (SerialException)
                if not self.com.isOpen():
                    self.com.open()
            except Exception:
                msg = "Opened serial port %s, but couldn't connect to PR650"
                self._error(msg % self.portString)
            else:
                self.isOpen = 1
        if self.OK:
            logging.info("Successfully opened %s" % self.portString)
            time.sleep(0.1)  # wait while establish connection
            # turn on the backlight as feedback
            reply = self.sendMessage(b'b1\n')
            if reply != self.codes['OK']:
                self._error("PR650 isn't communicating")

        if self.OK:
            # set command to make sure using right units etc...
            reply = self.sendMessage(b's01,,,,,,01,1')
Exemple #21
0
def _versionFilter(versions, wxVersion):
    """Returns all versions that are compatible with the Python and WX running PsychoPy

    Parameters
    ----------
    versions: list
        All available (valid) selections for the version to be chosen

    Returns
    -------
    list
        All valid selections for the version to be chosen that are compatible with Python version used
    """

    # Get Python 3 Compatibility
    if constants.PY3:
        msg = _translate("Filtering versions of PsychoPy only compatible with Python 3.")
        logging.info(msg)
        versions = [ver for ver in versions if ver == 'latest' or parse_version(ver) >= parse_version('1.90')]

    # Get WX Compatibility
    compatibleWX = '4.0'
    if wxVersion is not None and parse_version(wxVersion) >= parse_version(compatibleWX):
        msg = _translate("wx version: {}. Filtering versions of "
                         "PsychoPy only compatible with wx >= version {}".format(wxVersion,
                                                                              compatibleWX))
        logging.info(msg)
        return [ver for ver in versions if ver == 'latest' or parse_version(ver) > parse_version('1.85.04')]
    return versions
Exemple #22
0
def timingCheckAndLog(ts,trialN):
    #check for timing problems and log them
    #ts is a list of the times of the clock after each frame
    interframeIntervs = np.diff(ts)*1000
    #print '   interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
    frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
    longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
    idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
    numCasesInterframeLong = len( idxsInterframeLong )
    if numCasesInterframeLong >0 and (not demo):
       longFramesStr =  'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
       if demo: 
         longFramesStr += 'not printing them all because in demo mode'
       else:
           longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
                    str( np.around(  interframeIntervs[idxsInterframeLong] ,1  ) )+ ' and was these frames: '+ str(idxsInterframeLong)
       if longFramesStr != None:
                logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
                if not demo:
                    flankingAlso=list()
                    for idx in idxsInterframeLong: #also print timing of one before and one after long frame
                        if idx-1>=0:
                            flankingAlso.append(idx-1)
                        else: flankingAlso.append(np.NaN)
                        flankingAlso.append(idx)
                        if idx+1<len(interframeIntervs):  flankingAlso.append(idx+1)
                        else: flankingAlso.append(np.NaN)
                    flankingAlso = np.array(flankingAlso)
                    flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))]  #remove nan values
                    flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
                    logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) )  ) #because this is not an essential error message, as previous one already indicates error
                      #As INFO, at least it won't fill up the console when console set to WARNING or higher
    return numCasesInterframeLong
Exemple #23
0
    def authenticate(self, username, password=None, otp=None):
        """Authenticate according to username and password (if needed).

        If the username has been used already to create a token then that
        token will be reused (and no password is required). If not then the
        password will be sent (using https) and an auth token will be stored.
        """
        # try fetching a token first
        tokens = TokenStorage()
        if username in tokens:
            logging.info("Found previous auth token for {}".format(username))
            try:
                self.token = tokens[username]
                return 1
            except AuthError:
                if password is None:
                    raise AuthError("User token didn't work and no password")
        elif password is None:
            AuthError("No auth token found and no password given")
        token_url = constants.API_BASE+'/tokens/'
        token_request_body = {
            'data': {
                'type': 'tokens',
                'attributes': {
                    'name': '{} - {}'.format(
                        constants.PROJECT_NAME, datetime.date.today()),
                    'scopes': constants.APPLICATION_SCOPES
                }
            }
        }
        headers = {'content-type': 'application/json'}

        if otp is not None:
            headers['X-OSF-OTP'] = otp
        resp = self.post(
            token_url,
            headers=headers,
            data=json.dumps(token_request_body),
            auth=(username, password)
            )
        if resp.status_code in (401, 403):
            # If login failed because of a missing two-factor authentication
            # code, notify the user to try again
            # This header appears for basic auth requests, and only when a
            # valid password is provided
            otp_val = resp.headers.get('X-OSF-OTP', '')
            if otp_val.startswith('required'):
                raise AuthError('Must provide code for two-factor'
                                'authentication')
            else:
                raise AuthError('Invalid credentials')
        elif not resp.status_code == 201:
            raise AuthError('Invalid authorization response')
        else:
            json_resp = resp.json()
            logging.info("Successfully authenticated with username/password")
            self.authenticated = True
            self.token = json_resp['data']['attributes']['token_id']
            return 1
Exemple #24
0
    def getEvents(self, timeout=10):
        """Look for a string that matches SDAT;\n.........EDAT;\n
        and process it as events
        """
        foundDataStart=False
        t0=time.time()
        while not foundDataStart and (time.time()-t0)<timeout:
            if self.com.readline().startswith('SDAT'):
                foundDataStart=True
                logging.info("BBTK.getEvents() found data. Processing...")
                logging.flush() #we aren't in a time-critical period so flush messages
                break
        #check if we're processing data
        if not foundDataStart:
            logging.warning("BBTK.getEvents() found no data (SDAT was not found on serial port inputs")
            return []

        #helper function to parse time and event code
        def parseEventsLine(line, lastState=None):
            """Returns a list of dictionaries, one for each change detected in the state
            """
            state = line[:12]
            timeSecs = int(line[-14:-2])/10.0**6
            evts=[]
            evt=''
            if lastState is None:
                evts.append({'evt':'', 'state':state, 'time':timeSecs})
            else:
                for n in evtChannels.keys():
                    if state[n]!=lastState[n]:
                        if state[n]=='1':
                            evt = evtChannels[n]+"_on"
                        else:
                            evt = evtChannels[n]+"_off"
                        evts.append({'evt':evt, 'state':state, 'time':timeSecs})
            return evts

        #we've been sent data so work through it
        events=[]
        eventLines=[]
        lastState=None
        #try to read from port
        self.pause()
        self.com.setTimeout(2.0)
        nEvents = int(self.com.readline()[:-2]) #last two chars are ;\n
        self.com.readline()[:-2] # microseconds recorded (ignore)
        self.com.readline()[:-2] #samples recorded (ignore)
        while True:
            line = self.com.readline()
            if line.startswith('EDAT'): #end of data stream
                break
            events.extend(parseEventsLine(line, lastState))
            lastState = events[-1]['state']
            eventLines.append(line)
        if nEvents != len(eventLines):
            logging.warning("BBTK reported %i events but told us to expect %i events!!" %(len(events), nEvents))
        logging.flush() #we aren't in a time-critical period so flush messages
        return events
Exemple #25
0
 def run_event(self):
     if self.event == "exit":
         logging.info("Exiting. . .")
         self.window.close()
         core.quit()
     if self.event in ["nothing", "continue"]:
         pass
     else:
         logging.warn("Event not recognized. Doing nothing.")
 def run_event(self):
     if self.event == 'exit':
         logging.info("Exiting. . .")
         self.window.close()
         core.quit()
     if self.event in ['nothing', 'continue']:
         pass
     else:
         logging.warn("Event not recognized. Doing nothing.")
Exemple #27
0
 def apply_mv_local(self, asset, new_path, threaded=False):
     proj = self.proj()
     full_path_new = os.path.join(proj.local.root_path, new_path)
     full_path_old = os.path.join(proj.local.root_path, asset['path'])
     shutil.move(full_path_old, full_path_new)
     asset['path'] = new_path
     logging.info("Sync.Changes done: Moved file locally: {} -> {}"
                  .format(asset['full_path'], new_path))
     return 1
Exemple #28
0
 def apply_del_local(self, asset, new_path=None, threaded=False):
     proj = self.proj()
     full_path = os.path.join(proj.local.root_path, new_path)
     if os.path.isfile(full_path):  # might have been removed already?
         os.remove(full_path)
     if os.path.isdir(full_path):  # might have been removed already?
         os.rmdir(full_path)
     logging.info("Sync.Changes done: Removed file locally: {}"
                  .format(asset['path']))
     return 1
Exemple #29
0
    def font(self, font):
        """String. Set the font to be used for text rendering. font should
        be a string specifying the name of the font (in system resources).
        """
        self.__dict__['font'] = None  # until we find one
        if self.win.winType == "pyglet":
            self._font = pyglet.font.load(font, int(self._heightPix),
                                          dpi=72, italic=self.italic,
                                          bold=self.bold)
            self.__dict__['font'] = font
        else:
            if font is None or len(font) == 0:
                self.__dict__['font'] = pygame.font.get_default_font()
            elif font in pygame.font.get_fonts():
                self.__dict__['font'] = font
            elif type(font) == str:
                # try to find a xxx.ttf file for it
                # check for possible matching filenames
                fontFilenames = glob.glob(font + '*')
                if len(fontFilenames) > 0:
                    for thisFont in fontFilenames:
                        if thisFont[-4:] in ['.TTF', '.ttf']:
                            # take the first match
                            self.__dict__['font'] = thisFont
                            break  # stop at the first one we find
                    # trhen check if we were successful
                    if self.font is None and font != "":
                        # we didn't find a ttf filename
                        msg = ("Found %s but it doesn't end .ttf. "
                               "Using default font.")
                        logging.warning(msg % fontFilenames[0])
                        self.__dict__['font'] = pygame.font.get_default_font()

            if self.font is not None and os.path.isfile(self.font):
                self._font = pygame.font.Font(self.font, int(
                    self._heightPix), italic=self.italic, bold=self.bold)
            else:
                try:
                    self._font = pygame.font.SysFont(
                        self.font, int(self._heightPix), italic=self.italic,
                        bold=self.bold)
                    self.__dict__['font'] = font
                    logging.info('using sysFont ' + str(font))
                except Exception:
                    self.__dict__['font'] = pygame.font.get_default_font()
                    msg = ("Couldn't find font %s on the system. Using %s "
                           "instead! Font names should be written as "
                           "concatenated names all in lower case.\ne.g. "
                           "'arial', 'monotypecorsiva', 'rockwellextra', ...")
                    logging.error(msg % (font, self.font))
                    self._font = pygame.font.SysFont(
                        self.font, int(self._heightPix), italic=self.italic,
                        bold=self.bold)
        # re-render text after a font change
        self._needSetText = True
Exemple #30
0
    def __init__(self):
        super(_GlobalEventKeys, self).__init__()
        self._events = OrderedDict()

        if prefs.general['shutdownKey']:
            msg = ('Found shutdown key definition in preferences; '
                   'enabling shutdown key.')
            logging.info(msg)
            self.add(key=prefs.general['shutdownKey'],
                     modifiers=prefs.general['shutdownKeyModifiers'],
                     func=psychopy.core.quit,
                     name='shutdown (auto-created from prefs)')
Exemple #31
0
    def __init__(self, filename,
                 lang='en-US',
                 timeout=10,
                 samplingrate=16000,
                 pro_filter=2,
                 level=0):
        """
            :Parameters:

                `filename` : <required>
                    name of the speech file (.flac, .wav, or .spx) to process. wav files will be
                    converted to flac, and for this to work you need to have flac (as an
                    executable). spx format is speex-with-headerbyte (for Google).
                `lang` :
                    the presumed language of the speaker, as a locale code; default 'en-US'
                `timeout` :
                    seconds to wait before giving up, default 10
                `samplingrate` :
                    the sampling rate of the speech clip in Hz, either 16000 or 8000. You can
                    record at a higher rate, and then down-sample to 16000 for speech
                    recognition. `file` is the down-sampled file, not the original.
                    the sampling rate is auto-detected for .wav files.
                `pro_filter` :
                    profanity filter level; default 2 (e.g., f***)
                `level` :
                    flac compression level (0 less compression but fastest)
        """
        # set up some key parameters:
        results = 5  # how many words wanted
        self.timeout = timeout
        useragent = PSYCHOPY_USERAGENT
        host = "www.google.com/speech-api/v1/recognize"

        # determine file type, convert wav to flac if needed:
        if not os.path.isfile(filename):
            raise IOError("Cannot find file: %s" % filename)
        ext = os.path.splitext(filename)[1]
        if ext not in ['.flac', '.wav']:
            raise SoundFormatNotSupported("Unsupported filetype: %s\n" % ext)
        if ext == '.wav':
            _junk, samplingrate = readWavFile(filename)
        if samplingrate not in [16000, 8000]:
            raise SoundFormatNotSupported(
                'Speech2Text sample rate must be 16000 or 8000 Hz')
        self.filename = filename
        if ext == ".flac":
            filetype = "x-flac"
        elif ext == ".wav":  # convert to .flac
            filetype = "x-flac"
            filename = wav2flac(filename, level=level)  # opt for speed
        logging.info("Loading: %s as %s, audio/%s" %
                     (self.filename, lang, filetype))
        # occasional error; core.wait(.1) is not always enough; better slow
        # than fail
        c = 0
        while not os.path.isfile(filename) and c < 10:
            core.wait(.1, 0)
            c += 1
        audio = open(filename, 'rb').read()
        if ext == '.wav' and filename.endswith('.flac'):
            try:
                os.remove(filename)
            except Exception:
                pass

        # urllib2 makes no attempt to validate the server certificate. here's an idea:
        # http://thejosephturner.com/blog/2011/03/19/https-certificate-verification-in-python-with-urllib2/
        # set up the https request:
        url = 'https://' + host + '?xjerr=1&' +\
              'client=psychopy2&' +\
              'lang=' + lang + '&'\
              'pfilter=%d' % pro_filter + '&'\
              'maxresults=%d' % results
        header = {'Content-Type': 'audio/%s; rate=%d' % (filetype, samplingrate),
                  'User-Agent': useragent}
        web.requireInternetAccess()  # needed to access google's speech API
        try:
            self.request = urllib.request.Request(url, audio, header)
        except Exception:  # pragma: no cover
            # try again before accepting defeat
            logging.info("https request failed. %s, %s. trying again..." %
                         (filename, self.filename))
            core.wait(0.2, 0)
            self.request = urllib.request.Request(url, audio, header)
    refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
    refreshRateWrong = False
else:  # checkRefreshEtc
    runInfo = psychopy.info.RunTimeInfo(
        # if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
        #author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
        #version="<your experiment version info>",
        win=
        myWin,  # a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
        refreshTest=
        'grating',  # None, True, or 'grating' (eye-candy to avoid a blank screen)
        verbose=True,  # True means report on everything
        # if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
        userProcsDetailed=True)
    # print(runInfo)
    logging.info(runInfo)
    print(
        'Finished runInfo- which assesses the refresh and processes of this computer'
    )
    #check screen refresh is what assuming it is ##############################################
    Hzs = list()
    myWin.flip()
    myWin.flip()
    myWin.flip()
    myWin.flip()
    myWin.setRecordFrameIntervals(True)  # otherwise myWin.fps won't work
    print('About to measure frame flips')
    for i in range(50):
        myWin.flip()
        Hzs.append(myWin.fps())  # varies wildly on successive runs!
    myWin.setRecordFrameIntervals(False)
Exemple #33
0
 def rebuild_index(self):
     logging.info("Indexing LocalFiles")
     self._index = self._create_index()
     self._needs_rebuild_index = False
Exemple #34
0
 def OnDrop(self, x, y, files):
     """Not clear this method ever gets called!"""
     logging.info("Got Files")
Exemple #35
0
def switchOff():
    """(Not needed as of v1.76.00; kept for backwards compatibility only.)
    """
    logging.info("deprecated:  microphone.switchOff() is no longer needed.")
Exemple #36
0
    def font(self, font):
        """String. Set the font to be used for text rendering. font should
        be a string specifying the name of the font (in system resources).
        """
        self.__dict__['font'] = None  # until we find one
        if self.win.winType in ["pyglet", "glfw"]:
            self._font = pyglet.font.load(font,
                                          int(self._heightPix),
                                          dpi=72,
                                          italic=self.italic,
                                          bold=self.bold)
            self.__dict__['font'] = font
        else:
            if font is None or len(font) == 0:
                self.__dict__['font'] = pygame.font.get_default_font()
            elif font in pygame.font.get_fonts():
                self.__dict__['font'] = font
            elif type(font) == str:
                # try to find a xxx.ttf file for it
                # check for possible matching filenames
                fontFilenames = glob.glob(font + '*')
                if len(fontFilenames) > 0:
                    for thisFont in fontFilenames:
                        if thisFont[-4:] in ['.TTF', '.ttf']:
                            # take the first match
                            self.__dict__['font'] = thisFont
                            break  # stop at the first one we find
                    # trhen check if we were successful
                    if self.font is None and font != "":
                        # we didn't find a ttf filename
                        msg = ("Found %s but it doesn't end .ttf. "
                               "Using default font.")
                        logging.warning(msg % fontFilenames[0])
                        self.__dict__['font'] = pygame.font.get_default_font()

            if self.font is not None and os.path.isfile(self.font):
                self._font = pygame.font.Font(self.font,
                                              int(self._heightPix),
                                              italic=self.italic,
                                              bold=self.bold)
            else:
                try:
                    self._font = pygame.font.SysFont(self.font,
                                                     int(self._heightPix),
                                                     italic=self.italic,
                                                     bold=self.bold)
                    self.__dict__['font'] = font
                    logging.info('using sysFont ' + str(font))
                except Exception:
                    self.__dict__['font'] = pygame.font.get_default_font()
                    msg = ("Couldn't find font %s on the system. Using %s "
                           "instead! Font names should be written as "
                           "concatenated names all in lower case.\ne.g. "
                           "'arial', 'monotypecorsiva', 'rockwellextra', ...")
                    logging.error(msg % (font, self.font))
                    self._font = pygame.font.SysFont(self.font,
                                                     int(self._heightPix),
                                                     italic=self.italic,
                                                     bold=self.bold)
        # re-render text after a font change
        self._needSetText = True
Exemple #37
0
def upload(selector, filename, basicAuth=None, host=None, https=False, log=True):
    """Upload a local file over the internet to a configured http server.

    This method handshakes with a php script on a remote server to transfer a local
    file to another machine via http (using POST).

    Returns "success" plus a sha256 digest of the file on the server and a byte count.
    If the upload was not successful, an error code is returned (eg, "too_large" if the
    file size exceeds the limit specified server-side in up.php, or "no_file" if there
    was no POST attachment).

    .. note::
        The server that receives the files needs to be configured before uploading
        will work. php files and notes for a sys-admin are included in `psychopy/contrib/http/`.
        In particular, the php script `up.php` needs to be copied to the server's
        web-space, with appropriate permissions and directories, including apache
        basic auth and https (if desired). The maximum size for an upload can be configured within up.php

        A configured test-server is available; see the Coder demo for details
        (upload size is limited to ~1500 characters for the demo).

    **Parameters:**

        `selector` : (required, string)
            a standard URL of the form `http://host/path/to/up.php`, e.g., `http://upload.psychopy.org/test/up.php`

            .. note::
                Limited https support is provided (see below).

        `filename` : (required, string)
            the path to the local file to be transferred. The file can be any format:
            text, utf-8, binary. All files are hex encoded while in transit (increasing
            the effective file size).

            .. note::
                Encryption (*beta*) is available as a separate step. That is,
                first :mod:`~psychopy.contrib.opensslwrap.encrypt()` the file,
                then :mod:`~psychopy.web.upload()` the encrypted file in the same
                way that you would any other file.

        `basicAuth` : (optional)
            apache 'user:password' string for basic authentication. If a `basicAuth`
            value is supplied, it will be sent as the auth credentials (in cleartext);
            using https will encrypt the credentials.
        `host` : (optional)
            The default process is to extract host information from the `selector`. The `host` option
            allows you to specify a host explicitly (i.e., if it differs from the `selector`).
        `https` : (optional)
            If the remote server is configured to use https, passing the parameter
            `https=True` will encrypt the transmission including all data and `basicAuth`
            credentials. It is approximately as secure as using a self-signed X.509 certificate.

            An important caveat is that the authenticity of the certificate returned from the
            server is not checked, and so the certificate could potentially be spoofed
            (see the warning under HTTPSConnection http://docs.python.org/library/httplib.html).
            Overall, using https can still be much more secure than not using it.
            The encryption is good, but that of itself does not eliminate all risk.
            Importantly, it is not as secure as one might expect, given that all major web browsers
            do check certificate authenticity. The idea behind this parameter is to require people
            to explicitly indicate that they want to proceed anyway, in effect saying
            "I know what I am doing and accept the risks (of using un-verified certificates)".

    **Example:**

        See Coder demo / misc / http_upload.py

    Author: Jeremy R. Gray, 2012
    """

    requireInternetAccess()  # needed to upload over http

    fields = [('name', 'PsychoPy_upload'), ('type', 'file')]
    if not selector:
        logging.error('upload: need a selector, http://<host>/path/to/up.php')
        raise ValueError('upload: need a selector, http://<host>/path/to/up.php')
    if not host:
        host = selector.split('/')[2]
        if log:
            logging.info('upload: host extracted from selector = %s' % host)
    if selector.startswith('https'):
        if https is not True:
            logging.error('upload: https not explicitly requested. use https=True to proceed anyway (see API for security caveats).')
            raise ValueError('upload: https not fully supported (see API for caveats and usage), exiting.')
        elif log:
            logging.exp('upload: https requested; note that security is not fully assured (see API)')
    elif https:
        msg = 'upload: to use https, the selector URL must start with "https"'
        logging.error(msg)
        raise ValueError(msg)
    if not os.path.isfile(filename):
        logging.error('upload: file not found (%s)' % filename)
        raise ValueError('upload: file not found (%s)' % filename)
    contents = open(filename).read() # base64 encoded in _encode_multipart_formdata()
    file = [('file_1', filename, contents)]

    # initiate the POST:
    if log:
        logging.exp('upload: uploading file %s to %s' % (os.path.abspath(filename), selector))
    try:
        status, reason, result = _post_multipart(host, selector, fields, file,
                                                 basicAuth=basicAuth, https=https)
    except TypeError:
        status = 'no return value from _post_multipart(). '
        reason = 'config error?'
        result = status + reason
    except urllib2.URLError as ex:
        logging.error('upload: URL Error. (no internet connection?)')
        raise ex

    # process the result:
    if status == 200:
        result_fields = result.split()
        #result = 'status_msg digest' # if using up.php
        if result_fields[0] == 'good_upload':
            outcome = 'success'+' '+result
        else:
            outcome = result # failure code
    elif status == 404:
        outcome = '404 Not_Found: server config error'
    elif status == 403:
        outcome = '403 Forbidden: server config error'
    elif status == 401:
        outcome = '401 Denied: failed apache Basic authorization, or config error'
    elif status == 400:
        outcome = '400 Bad request: failed, possible config error'
    else:
        outcome = str(status) + ' ' + reason

    if status == -1 or status > 299 or type(status) == str:
        logging.error('upload: ' + outcome[:102])
    else:
        if outcome.startswith('success'):
            if log:
                logging.info('upload: ' + outcome[:102])
        else:
            logging.error('upload: ' + outcome[:102])
    return outcome
Exemple #38
0
def createLinearRamp(rampType=None, rampSize=256, driver=None):
    """Generate the Nx3 values for a linear gamma ramp on the current platform.
    This uses heuristics about known graphics cards to guess the 'rampType' if
    none is explicitly given.

    Much of this work is ported from LoadIdentityClut.m, by Mario Kleiner
    for the psychtoolbox

    rampType 0 : an 8-bit CLUT ranging 0:1
        This is seems correct for most windows machines and older macOS systems
        Known to be used by:
            OSX 10.4.9 PPC with GeForceFX-5200

    rampType 1 : an 8-bit CLUT ranging (1/256.0):1
        For some reason a number of macs then had a CLUT that (erroneously?)
        started with 1/256 rather than 0. Known to be used by:
            OSX 10.4.9 with ATI Mobility Radeon X1600
            OSX 10.5.8 with ATI Radeon HD-2600
            maybe all ATI cards?

    rampType 2 : a 10-bit CLUT ranging 0:(1023/1024)
        A slightly odd 10-bit CLUT that doesn't quite finish on 1.0!
        Known to be used by:
            OSX 10.5.8 with Geforce-9200M (MacMini)
            OSX 10.5.8 with Geforce-8800

    rampType 3 : a nasty, bug-fixing 10bit CLUT for crumby macOS drivers
        Craziest of them all for Snow leopard. Like rampType 2, except that
        the upper half of the table has 1/256.0 removed?!!
        Known to be used by:
            OSX 10.6.0 with NVidia Geforce-9200M
    """
    def _versionTuple(v):
        # for proper sorting: _versionTuple('10.8') < _versionTuple('10.10')
        return tuple(map(int, v.split('.')))

    if rampType is None:

        # try to determine rampType from heuristics including sys info

        osxVer = platform.mac_ver()[0]  # '' on non-Mac

        # OSX
        if osxVer:
            osxVerTuple = _versionTuple(osxVer)

            # driver provided
            if driver is not None:

                # nvidia
                if 'NVIDIA' in driver:
                    # leopard nVidia cards don't finish at 1.0!
                    if _versionTuple("10.5") < osxVerTuple < _versionTuple(
                            "10.6"):
                        rampType = 2
                    # snow leopard cards are plain crazy!
                    elif _versionTuple("10.6") < osxVerTuple:
                        rampType = 3
                    else:
                        rampType = 1

                # non-nvidia
                else:  # is ATI or unknown manufacturer, default to (1:256)/256
                    # this is certainly correct for radeon2600 on 10.5.8 and
                    # radeonX1600 on 10.4.9
                    rampType = 1

            # no driver info given
            else:  # is ATI or unknown manufacturer, default to (1:256)/256
                # this is certainly correct for radeon2600 on 10.5.8 and
                # radeonX1600 on 10.4.9
                rampType = 1

        # win32 or linux
        else:  # for win32 and linux this is sensible, not clear about Vista and Windows7
            rampType = 0

    if rampType == 0:
        ramp = numpy.linspace(0.0, 1.0, num=rampSize)
    elif rampType == 1:
        ramp = numpy.linspace(1 / 256.0, 1.0, num=256)
    elif rampType == 2:
        ramp = numpy.linspace(0, 1023.0 / 1024, num=1024)
    elif rampType == 3:
        ramp = numpy.linspace(0, 1023.0 / 1024, num=1024)
        ramp[512:] = ramp[512:] - 1 / 256.0
    logging.info('Using gamma ramp type: %i' % rampType)
    return ramp
Exemple #39
0
    def importItems(self, items):
        """Import items from csv or excel sheet and convert to list of dicts.
        Will also accept a list of dicts.

        Note, for csv and excel files, 'options' must contain comma separated values,
        e.g., one, two, three. No parenthesis, or quotation marks required.

        Parameters
        ----------
        items :  Excel or CSV file, list of dicts
            Items used to populate the Form

        Returns
        -------
        List of dicts
            A list of dicts, where each list entry is a dict containing all fields for a single Form item
        """
        def _checkSynonyms(items, fieldNames):
            """Checks for updated names for fields (i.e. synonyms)"""

            replacedFields = set()
            for field in _synonyms:
                synonym = _synonyms[field]
                for item in items:
                    if synonym in item:
                        # convert to new name
                        item[field] = item[synonym]
                        del item[synonym]
                        replacedFields.add(field)
            for field in replacedFields:
                fieldNames.append(field)
                fieldNames.remove(_synonyms[field])
                logging.warning("Form {} included field no longer used {}. "
                                "Replacing with new name '{}'".format(
                                    self.name, _synonyms[field], field))

        def _checkRequiredFields(fieldNames):
            """Checks for required headings (do this after checking synonyms)"""
            for hdr in _knownFields:
                # is it required and/or present?
                if _knownFields[hdr] == _REQUIRED and hdr not in fieldNames:
                    raise ValueError("Missing header ({}) in Form ({}). "
                                     "Headers found were: {}".format(
                                         hdr, self.name, fieldNames))

        def _checkTypes(types, itemText):
            """A nested function for testing the number of options given

            Raises ValueError if n Options not > 1
            """
            itemDiff = set([types]) - set(_knownRespTypes)

            for incorrItemType in itemDiff:
                if incorrItemType == _REQUIRED:
                    if self._itemsFile:
                        itemsFileStr = ("in items file '{}'".format(
                            self._itemsFile))
                    else:
                        itemsFileStr = ""
                    msg = ("Item {}{} is missing a required "
                           "value for its response type. Permitted types are "
                           "{}.".format(itemText, itemsFileStr,
                                        _knownRespTypes))
                if self.autoLog:
                    logging.error(msg)
                raise ValueError(msg)

        def _addDefaultItems(items):
            """
            Adds default items when missing. Works in-place.

            Parameters
            ----------
            items : List of dicts
            headers : List of column headers for each item
            """
            def isPresent(d, field):
                # check if the field is there and not empty on this row
                return (field in d and d[field] not in [None, ''])

            missingHeaders = []
            defaultValues = _knownFields
            for index, item in enumerate(items):
                defaultValues['index'] = index
                for header in defaultValues:
                    # if header is missing of val is None or ''
                    if not isPresent(item, header):
                        oldHeader = header.replace('item', 'question')
                        if isPresent(item, oldHeader):
                            item[header] = item[oldHeader]
                            logging.warning(
                                "{} is a deprecated heading for Forms. "
                                "Use {} instead".format(oldHeader, header))
                            continue
                        # Default to colour scheme if specified
                        if defaultValues[header] in ['fg', 'bg', 'em']:
                            item[header] = self.colorScheme[
                                defaultValues[header]]
                        else:
                            item[header] = defaultValues[header]
                        missingHeaders.append(header)

            msg = "Using default values for the following headers: {}".format(
                missingHeaders)
            if self.autoLog:
                logging.info(msg)

        if self.autoLog:
            logging.info("Importing items...")

        if not isinstance(items, list):
            # items is a conditions file
            self._itemsFile = Path(items)
            items, fieldNames = importConditions(items, returnFieldNames=True)
        else:  # we already have a list so lets find the fieldnames
            fieldNames = set()
            for item in items:
                fieldNames = fieldNames.union(item)
            fieldNames = list(fieldNames)  # convert to list at the end
            self._itemsFile = None

        _checkSynonyms(items, fieldNames)
        _checkRequiredFields(fieldNames)
        # Add default values if entries missing
        _addDefaultItems(items)

        # Convert options to list of strings
        for idx, item in enumerate(items):
            if item['ticks']:
                item['ticks'] = listFromString(item['ticks'])
            if 'tickLabels' in item and item['tickLabels']:
                item['tickLabels'] = listFromString(item['tickLabels'])
            if 'options' in item and item['options']:
                item['options'] = listFromString(item['options'])

        # Check types
        [_checkTypes(item['type'], item['itemText']) for item in items]
        # Check N options > 1
        # Randomise items if requested
        if self.randomize:
            shuffle(items)
        return items