sys.exc_info

Here are the examples of the python api sys.exc_info taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

112 Examples 7

Example 101

Project: modrana Source File: connectionpool.py
Function: url_open
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            conn = conn and conn.close()
            release_conn = True
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            conn = conn and conn.close()
            release_conn = True
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            # Discard the connection for these exceptions. It will be
            # be replaced during the next _get_conn() call.
            conn = conn and conn.close()
            release_conn = True

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 102

Project: RasPiConnectServer Source File: ExecuteRemoteWebView.py
def Generate_Remote_WebView(root, LOCALURL):

       	if (Config.i2c_demo()):
		from pyblinkm import BlinkM, Scripts

	
	objectServerID = root.find("./OBJECTSERVERID").text
        objectFlags = root.find("./OBJECTFLAGS").text
	
	validate = Validate.checkForValidate(root) 

        if (Config.debug()):
		print "VALIDATE=%s" % validate

	outgoingXMLData = BuildResponse.buildHeader(root)

	if (objectServerID == "W-1"):
	
		#check for validate request
		if (validate == "YES"):
			outgoingXMLData += Validate.buildValidateResponse("YES")
			outgoingXMLData += BuildResponse.buildFooter()

			return outgoingXMLData
			
		# normal response requested	
	
		responseData = ""

		# check to see if i2c_demo is turned on
		if (Config.i2c_demo()):
		
        		if (Config.debug()):
				print "Config.i2c_demo passed as True" 


			# Yes, it is on

			# Initialise the BMP085 and use STANDARD mode (default value)
			# bmp = BMP085(0x77, debug=True)
			# bmp = BMP085(0x77)
		
			# To specify a different operating mode, uncomment one of the following:
			# bmp = BMP085(0x77, 0)  # ULTRALOWPOWER Mode
			# bmp = BMP085(0x77, 1)  # STANDARD Mode
			# bmp = BMP085(0x77, 2)  # HIRES Mode
			bmp = BMP085(0x77, 3)  # ULTRAHIRES Mode
			
			count = 0
			exceptionCount = 0
			exceptionCountBMP = 0
			blinkm = BlinkM(1,0xc)
			blinkm.reset()
			
	
			
			try:
				temp = bmp.readTemperature()
				pressure = bmp.readPressure()
				altitude = bmp.readAltitude()
		
				tempData = "%.2f C" % temp
				pressureData = "%.2f hPa" % (pressure / 100.0)

			except IOError as e:
    				exceptionCountBMP = exceptionCountBMP + 1	
				print "I/O error({0}): {1}".format(e.errno, e.strerror)
			except:
    				exceptionCountBMP = exceptionCountBMP + 1	
    				print "Unexpected error:", sys.exc_info()[0]
    				raise

		else:    # now set some values for display since we don't have i2C
			tempData = "xx.x C (no i2c enabled)" 
			pressureData = "xxxx.x hPa (no i2c enabled)" 

				

		# read an HTML template into aw string		
		with open ("./Templates/W-1.html", "r") as myfile:
    			responseData += myfile.read().replace('\n', '')
	
		# replace the URL so it will point to static
		responseData = responseData.replace("XXX", LOCALURL) 
	

		# now replace the AAA, BBB, etc with the right data
		responseData = responseData.replace("AAA", subprocess.check_output(["date", ""], shell=True))	

		# split uptime at first blank, then at first ,
		uptimeString = subprocess.check_output(["uptime", ""])	
	
		uptimeType = uptimeString.split(",")
		uptimeCount = len(uptimeType)

		if (uptimeCount == 6):
			# over 24 hours
			uptimeSplit = uptimeString.split(",")
			uptimeSplit = uptimeSplit[0]+uptimeSplit[1]
			uptimeSplit = uptimeSplit.split(" ", 1)
			uptimeData = uptimeSplit[1]
		else:	
			# under 24 hours
			uptimeSplit = uptimeString.split(" ", 2)
			uptimeSplit = uptimeSplit[2].split(",", 1)
			uptimeData = uptimeSplit[0]

		responseData = responseData.replace("BBB", uptimeData)	

		usersString = subprocess.check_output(["who", "-q"], shell=False, stderr=subprocess.STDOUT,)	
		responseData = responseData.replace("CCC", usersString)	

		freeString = subprocess.check_output(["free", "-mh"])	
		freeSplit = freeString.split("cache: ", 1)
		freeSplit = freeSplit[1].split("       ", 2)
		freeSplit = freeSplit[2].split("\nSwap:", 1)
		freeData = freeSplit[0]


		responseData = responseData.replace("DDD", freeData)	
			
		responseData = responseData.replace("EEE", tempData)	
		responseData = responseData.replace("FFF", pressureData)	


		output = subprocess.check_output(["cat", "/sys/class/thermal/thermal_zone0/temp"])
		cpuTemp = "%3.2f C" % (float(output)/1000.0)
			
		responseData = responseData.replace("GGG", cpuTemp)	

		try:	
			freeString = subprocess.check_output(["ifconfig", "eth0"])	
			freeSplit = freeString.split("inet addr:", 1)
			if (len(freeSplit) > 1):
				freeSplit = freeSplit[1].split(" ", 1)
				freeData = freeSplit[0]
			else:
				freeData = ""

		except:
			freeData = ""


		responseData = responseData.replace("HHH", freeData)	
			
		responseData = responseData.replace("III", Config.localURL())
		# responseData = responseData.replace("III", "'your external address here'")

		responseData = responseData.replace("JJJ", Config.version_number())

		# read latest data from ST-1 SendText control on RasPiConnect 

		try:
			with open ("./local/ST-1.txt", "r") as myfile:
    				sendTextData = myfile.read().replace('\n', '')
   		except IOError:
			sendTextData = ""

		responseData = responseData.replace("KKK", sendTextData)

	

		# check to see if i2c_demo is turned on
		if (Config.i2c_demo()):
		
			time.sleep(0.2)

   	 		try:
	
               			blinkm.go_to(255, 0, 0)
				time.sleep(0.2)
               			blinkm.go_to(0, 255, 0)
	
	
       			except IOError as e:
             			#blinkm.reset()
                		exceptionCount = exceptionCount + 1
                		print "I/O error({0}): {1}".format(e.errno, e.strerror)
        		except:
               			blinkm.reset()
                		exceptionCount = exceptionCount + 1
                		print "Unexpected error:", sys.exc_info()[0]
                		raise
	
		#responseData += subprocess.check_output(["cat", "/proc/cpuinfo"])
		#responseData += subprocess.check_output(["cat", "/proc/meminfo"])
		
		outgoingXMLData += BuildResponse.buildResponse(responseData)

        	if (Config.debug()):
			print outgoingXMLData	
	elif (objectServerID == "W-2"):
	
		#check for validate request
		if (validate == "YES"):
			outgoingXMLData += Validate.buildValidateResponse("YES")
			outgoingXMLData += BuildResponse.buildFooter()

			return outgoingXMLData
			
		# normal response requested	

		imageName = "RovioImage.jpg"	


		responseData = "<html><head>"
		responseData += "<title></title><style>body,html,iframe{margin:0;padding:0;}</style>"
		responseData += "</head>"
		
		responseData += "<body><img src=\""
 		responseData += LOCALURL 
 		responseData += "static/"
		responseData += imageName
		responseData += "\" type=\"jpg\" width=\"300\" height=\"300\">"
		responseData += "<BR>Picture<BR>"

		responseData +="</body>"
		
		responseData += "</html>"
	
		
		outgoingXMLData += BuildResponse.buildResponse(responseData)

        	if (Config.debug()):
			print outgoingXMLData	

	else:
		# invalid RaspiConnect Code
		outgoingXMLData += Validate.buildValidateResponse("NO")	
	

	outgoingXMLData += BuildResponse.buildFooter()

	return outgoingXMLData

Example 103

Project: MAVProxy Source File: mavproxy_link.py
    def master_callback(self, m, master):
        '''process mavlink message m on master, sending any messages to recipients'''

        # see if it is handled by a specialised sysid connection
        sysid = m.get_srcSystem()
        if sysid in self.mpstate.sysid_outputs:
            self.mpstate.sysid_outputs[sysid].write(m.get_msgbuf())
            if m.get_type() == "GLOBAL_POSITION_INT" and self.module('map') is not None:
                self.module('map').set_secondary_vehicle_position(m)
            return

        if getattr(m, '_timestamp', None) is None:
            master.post_message(m)
        self.status.counters['MasterIn'][master.linknum] += 1

        mtype = m.get_type()

        # and log them
        if mtype not in dataPackets and self.mpstate.logqueue:
            # put link number in bottom 2 bits, so we can analyse packet
            # delay in saved logs
            usec = self.get_usec()
            usec = (usec & ~3) | master.linknum
            self.mpstate.logqueue.put(str(struct.pack('>Q', usec) + m.get_msgbuf()))

        # keep the last message of each type around
        self.status.msgs[m.get_type()] = m
        if not m.get_type() in self.status.msg_count:
            self.status.msg_count[m.get_type()] = 0
        self.status.msg_count[m.get_type()] += 1

        if m.get_srcComponent() == mavutil.mavlink.MAV_COMP_ID_GIMBAL and m.get_type() == 'HEARTBEAT':
            # silence gimbal heartbeat packets for now
            return

        if getattr(m, 'time_boot_ms', None) is not None:
            # update link_delayed attribute
            self.handle_msec_timestamp(m, master)

        if mtype in activityPackets:
            if master.linkerror:
                master.linkerror = False
                self.say("link %u OK" % (master.linknum+1))
            self.status.last_message = time.time()
            master.last_message = self.status.last_message

        if master.link_delayed:
            # don't process delayed packets that cause double reporting
            if mtype in delayedPackets:
                return

        if mtype == 'HEARTBEAT' and m.type != mavutil.mavlink.MAV_TYPE_GCS:
            if self.settings.target_system == 0 and self.settings.target_system != m.get_srcSystem():
                self.settings.target_system = m.get_srcSystem()
                self.say("online system %u" % self.settings.target_system,'message')

            if self.status.heartbeat_error:
                self.status.heartbeat_error = False
                self.say("heartbeat OK")
            if master.linkerror:
                master.linkerror = False
                self.say("link %u OK" % (master.linknum+1))

            self.status.last_heartbeat = time.time()
            master.last_heartbeat = self.status.last_heartbeat

            armed = self.master.motors_armed()
            if armed != self.status.armed:
                self.status.armed = armed
                if armed:
                    self.say("ARMED")
                else:
                    self.say("DISARMED")

            if master.flightmode != self.status.flightmode:
                self.status.flightmode = master.flightmode
                if self.mpstate.functions.input_handler is None:
                    self.set_prompt(self.status.flightmode + "> ")

            if master.flightmode != self.status.last_mode_announced and time.time() > self.status.last_mode_announce + 2:
                    self.status.last_mode_announce = time.time()
                    self.status.last_mode_announced = master.flightmode
                    self.say("Mode " + self.status.flightmode)

            if m.type == mavutil.mavlink.MAV_TYPE_FIXED_WING:
                self.mpstate.vehicle_type = 'plane'
                self.mpstate.vehicle_name = 'ArduPlane'
            elif m.type in [mavutil.mavlink.MAV_TYPE_GROUND_ROVER,
                            mavutil.mavlink.MAV_TYPE_SURFACE_BOAT,
                            mavutil.mavlink.MAV_TYPE_SUBMARINE]:
                self.mpstate.vehicle_type = 'rover'
                self.mpstate.vehicle_name = 'APMrover2'
            elif m.type in [mavutil.mavlink.MAV_TYPE_QUADROTOR,
                            mavutil.mavlink.MAV_TYPE_COAXIAL,
                            mavutil.mavlink.MAV_TYPE_HEXAROTOR,
                            mavutil.mavlink.MAV_TYPE_OCTOROTOR,
                            mavutil.mavlink.MAV_TYPE_TRICOPTER,
                            mavutil.mavlink.MAV_TYPE_HELICOPTER]:
                self.mpstate.vehicle_type = 'copter'
                self.mpstate.vehicle_name = 'ArduCopter'
            elif m.type in [mavutil.mavlink.MAV_TYPE_ANTENNA_TRACKER]:
                self.mpstate.vehicle_type = 'antenna'
                self.mpstate.vehicle_name = 'AntennaTracker'

        elif mtype == 'STATUSTEXT':
            if m.text != self.status.last_apm_msg or time.time() > self.status.last_apm_msg_time+2:
                (fg, bg) = self.colors_for_severity(m.severity)
                self.mpstate.console.writeln("APM: %s" % m.text, bg=bg, fg=fg)
                self.status.last_apm_msg = m.text
                self.status.last_apm_msg_time = time.time()

        elif mtype == "VFR_HUD":
            have_gps_lock = False
            if 'GPS_RAW' in self.status.msgs and self.status.msgs['GPS_RAW'].fix_type == 2:
                have_gps_lock = True
            elif 'GPS_RAW_INT' in self.status.msgs and self.status.msgs['GPS_RAW_INT'].fix_type == 3:
                have_gps_lock = True
            if have_gps_lock and not self.status.have_gps_lock and m.alt != 0:
                self.say("GPS lock at %u meters" % m.alt, priority='notification')
                self.status.have_gps_lock = True

        elif mtype == "GPS_RAW":
            if self.status.have_gps_lock:
                if m.fix_type != 2 and not self.status.lost_gps_lock and (time.time() - self.status.last_gps_lock) > 3:
                    self.say("GPS fix lost")
                    self.status.lost_gps_lock = True
                if m.fix_type == 2 and self.status.lost_gps_lock:
                    self.say("GPS OK")
                    self.status.lost_gps_lock = False
                if m.fix_type == 2:
                    self.status.last_gps_lock = time.time()

        elif mtype == "GPS_RAW_INT":
            if self.status.have_gps_lock:
                if m.fix_type < 3 and not self.status.lost_gps_lock and (time.time() - self.status.last_gps_lock) > 3:
                    self.say("GPS fix lost")
                    self.status.lost_gps_lock = True
                if m.fix_type >= 3 and self.status.lost_gps_lock:
                    self.say("GPS OK")
                    self.status.lost_gps_lock = False
                if m.fix_type >= 3:
                    self.status.last_gps_lock = time.time()

        elif mtype == "NAV_CONTROLLER_OUTPUT" and self.status.flightmode == "AUTO" and self.mpstate.settings.distreadout:
            rounded_dist = int(m.wp_dist/self.mpstate.settings.distreadout)*self.mpstate.settings.distreadout
            if math.fabs(rounded_dist - self.status.last_distance_announce) >= self.mpstate.settings.distreadout:
                if rounded_dist != 0:
                    self.say("%u" % rounded_dist, priority="progress")
            self.status.last_distance_announce = rounded_dist

        elif mtype == "GLOBAL_POSITION_INT":
            self.report_altitude(m.relative_alt*0.001)

        elif mtype == "COMPASSMOT_STATUS":
            print(m)

        elif mtype == "BAD_DATA":
            if self.mpstate.settings.shownoise and mavutil.all_printable(m.data):
                self.mpstate.console.write(str(m.data), bg='red')
        elif mtype in [ "COMMAND_ACK", "MISSION_ACK" ]:
            self.mpstate.console.writeln("Got MAVLink msg: %s" % m)

            if mtype == "COMMAND_ACK" and m.command == mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION:
                if m.result == mavutil.mavlink.MAV_RESULT_ACCEPTED:
                    self.say("Calibrated")
        else:
            #self.mpstate.console.writeln("Got MAVLink msg: %s" % m)
            pass

        if self.status.watch is not None:
            if fnmatch.fnmatch(m.get_type().upper(), self.status.watch.upper()):
                self.mpstate.console.writeln('< '+str(m))

        # don't pass along bad data
        if mtype != 'BAD_DATA':
            # pass messages along to listeners, except for REQUEST_DATA_STREAM, which
            # would lead a conflict in stream rate setting between mavproxy and the other
            # GCS
            if self.mpstate.settings.mavfwd_rate or mtype != 'REQUEST_DATA_STREAM':
                if not mtype in self.no_fwd_types:
                    for r in self.mpstate.mav_outputs:
                        r.write(m.get_msgbuf())

            # pass to modules
            for (mod,pm) in self.mpstate.modules:
                if not hasattr(mod, 'mavlink_packet'):
                    continue
                try:
                    mod.mavlink_packet(m)
                except Exception as msg:
                    if self.mpstate.settings.moddebug == 1:
                        print(msg)
                    elif self.mpstate.settings.moddebug > 1:
                        exc_type, exc_value, exc_traceback = sys.exc_info()
                        traceback.print_exception(exc_type, exc_value, exc_traceback,
                                                  limit=2, file=sys.stdout)

Example 104

Project: mongo-connector Source File: oplog_manager.py
    def dump_collection(self):
        """Dumps collection into the target system.

        This method is called when we're initializing the cursor and have no
        configs i.e. when we're starting for the first time.
        """

        timestamp = retry_until_ok(self.get_last_oplog_timestamp)
        if timestamp is None:
            return None
        long_ts = util.bson_ts_to_long(timestamp)
        # Flag if this oplog thread was cancelled during the collection dump.
        # Use a list to workaround python scoping.
        dump_cancelled = [False]

        def get_all_ns():
            all_ns_set = []
            db_list = retry_until_ok(self.primary_client.database_names)
            for database in db_list:
                if database == "config" or database == "local":
                    continue
                coll_list = retry_until_ok(
                    self.primary_client[database].collection_names)
                for coll in coll_list:
                    # ignore system collections
                    if coll.startswith("system."):
                        continue
                    # ignore gridfs collections
                    if coll.endswith(".files") or coll.endswith(".chunks"):
                        continue
                    namespace = "%s.%s" % (database, coll)
                    all_ns_set.append(namespace)
            return all_ns_set

        def get_ns_from_set(namespace_set, include=True):
            all_ns_set = get_all_ns()
            ns_set = []
            for src_ns in all_ns_set:
                for map_ns in namespace_set:
                    # get all matched collections in that ns
                    reg_pattern = r'\A' + map_ns.replace('*', '(.*)') + r'\Z'
                    if re.match(reg_pattern, src_ns):
                        ns_set.append(src_ns)
                        continue

            if include:
                return ns_set
            else:
                return [x for x in all_ns_set if x not in ns_set]

        # No namespaces specified
        if (not self.namespace_set) and (not self.ex_namespace_set):
            dump_set = get_all_ns()
        elif self.namespace_set and not self.ex_namespace_set:
            dump_set = get_ns_from_set(self.namespace_set)
        else:
            # ex_namespace_set is set but no namespace_set
            dump_set = get_ns_from_set(self.ex_namespace_set, include=False)

        LOG.debug("OplogThread: Dumping set of collections %s " % dump_set)

        def docs_to_dump(from_coll):
            last_id = None
            attempts = 0

            # Loop to handle possible AutoReconnect
            while attempts < 60:
                if last_id is None:
                    cursor = retry_until_ok(
                        from_coll.find,
                        projection=self._projection,
                        sort=[("_id", pymongo.ASCENDING)]
                    )
                else:
                    cursor = retry_until_ok(
                        from_coll.find,
                        {"_id": {"$gt": last_id}},
                        projection=self._projection,
                        sort=[("_id", pymongo.ASCENDING)]
                    )
                try:
                    for doc in cursor:
                        if not self.running:
                            # Thread was joined while performing the
                            # collection dump.
                            dump_cancelled[0] = True
                            raise StopIteration
                        last_id = doc["_id"]
                        yield doc
                    break
                except (pymongo.errors.AutoReconnect,
                        pymongo.errors.OperationFailure):
                    attempts += 1
                    time.sleep(1)

        def upsert_each(dm):
            num_failed = 0
            for namespace in dump_set:
                from_coll = self.get_collection(namespace)
                total_docs = retry_until_ok(from_coll.count)
                num = None
                for num, doc in enumerate(docs_to_dump(from_coll)):
                    try:
                        mapped_ns = self.dest_mapping_stru.get(namespace,
                                                               namespace)
                        dm.upsert(doc, mapped_ns, long_ts)
                    except Exception:
                        if self.continue_on_error:
                            LOG.exception(
                                "Could not upsert docuement: %r" % doc)
                            num_failed += 1
                        else:
                            raise
                    if num % 10000 == 0:
                        LOG.info("Upserted %d out of approximately %d docs "
                                 "from collection '%s'",
                                 num + 1, total_docs, namespace)
                if num is not None:
                    LOG.info("Upserted %d out of approximately %d docs from "
                             "collection '%s'",
                             num + 1, total_docs, namespace)
            if num_failed > 0:
                LOG.error("Failed to upsert %d docs" % num_failed)

        def upsert_all(dm):
            try:
                for namespace in dump_set:
                    from_coll = self.get_collection(namespace)
                    total_docs = retry_until_ok(from_coll.count)
                    mapped_ns = self.dest_mapping_stru.get(namespace,
                                                           namespace)
                    LOG.info("Bulk upserting approximately %d docs from "
                             "collection '%s'",
                             total_docs, namespace)
                    dm.bulk_upsert(docs_to_dump(from_coll), mapped_ns, long_ts)
            except Exception:
                if self.continue_on_error:
                    LOG.exception("OplogThread: caught exception"
                                  " during bulk upsert, re-upserting"
                                  " docuements serially")
                    upsert_each(dm)
                else:
                    raise

        def do_dump(dm, error_queue):
            try:
                LOG.debug("OplogThread: Using bulk upsert function for "
                          "collection dump")
                upsert_all(dm)

                # Dump GridFS files
                for gridfs_ns in self.gridfs_set:
                    mongo_coll = self.get_collection(gridfs_ns)
                    from_coll = self.get_collection(gridfs_ns + '.files')
                    dest_ns = self.dest_mapping_stru.get(gridfs_ns, gridfs_ns)
                    for doc in docs_to_dump(from_coll):
                        gridfile = GridFSFile(mongo_coll, doc)
                        dm.insert_file(gridfile, dest_ns, long_ts)
            except:
                # Likely exceptions:
                # pymongo.errors.OperationFailure,
                # mongo_connector.errors.ConnectionFailed
                # mongo_connector.errors.OperationFailed
                error_queue.put(sys.exc_info())

        # Extra threads (if any) that assist with collection dumps
        dumping_threads = []
        # Did the dump succeed for all target systems?
        dump_success = True
        # Holds any exceptions we can't recover from
        errors = queue.Queue()

        if len(self.doc_managers) == 1:
            do_dump(self.doc_managers[0], errors)
        else:
            # Slight performance gain breaking dump into separate
            # threads if > 1 replication target
            for dm in self.doc_managers:
                t = threading.Thread(target=do_dump, args=(dm, errors))
                dumping_threads.append(t)
                t.start()
            # cleanup
            for t in dumping_threads:
                t.join()

        # Print caught exceptions
        try:
            while True:
                LOG.critical('Exception during collection dump',
                             exc_info=errors.get_nowait())
                dump_success = False
        except queue.Empty:
            pass

        if not dump_success:
            err_msg = "OplogThread: Failed during dump collection"
            effect = "cannot recover!"
            LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
            self.running = False
            return None

        if dump_cancelled[0]:
            LOG.warning('Initial collection dump was interrupted. '
                        'Will re-run the collection dump on next startup.')
            return None

        return timestamp

Example 105

Project: amo-validator Source File: actions.py
def _expr_assignment(traverser, node):
    """Evaluate an AssignmentExpression node."""

    traverser._debug('ASSIGNMENT_EXPRESSION')
    traverser.debug_level += 1

    traverser._debug('ASSIGNMENT>>PARSING RIGHT')
    right = traverser._traverse_node(node['right'])
    right = JSWrapper(right, traverser=traverser)

    # Treat direct assignment different than augmented assignment.
    if node['operator'] == '=':
        from predefinedentities import GLOBAL_ENTITIES, is_shared_scope

        global_overwrite = False
        readonly_value = is_shared_scope(traverser)

        node_left = node['left']
        traverser._debug('ASSIGNMENT:DIRECT(%s)' % node_left['type'])

        if node_left['type'] == 'Identifier':
            # Identifiers just need the ID name and a value to push.
            # Raise a global overwrite issue if the identifier is global.
            global_overwrite = traverser._is_global(node_left['name'])

            # Get the readonly attribute and store its value if is_global
            if global_overwrite:
                global_dict = GLOBAL_ENTITIES[node_left['name']]
                if 'readonly' in global_dict:
                    readonly_value = global_dict['readonly']

            traverser._declare_variable(node_left['name'], right, type_='glob')
        elif node_left['type'] == 'MemberExpression':
            member_object = trace_member(traverser, node_left['object'],
                                         instantiate=True)
            global_overwrite = (member_object.is_global and
                                not ('overwritable' in member_object.value and
                                     member_object.value['overwritable']))
            member_property = _get_member_exp_property(traverser, node_left)
            traverser._debug('ASSIGNMENT:MEMBER_PROPERTY(%s)'
                             % member_property)
            traverser._debug('ASSIGNMENT:GLOB_OV::%s' % global_overwrite)

            # Don't do the assignment if we're facing a global.
            if not member_object.is_global:
                if member_object.value is None:
                    member_object.value = JSObject()

                if not member_object.is_global:
                    member_object.value.set(member_property, right, traverser)
                else:
                    # It's probably better to do nothing.
                    pass

            elif 'value' in member_object.value:
                member_object_value = _expand_globals(traverser,
                                                      member_object).value
                if member_property in member_object_value['value']:

                    # If it's a global and the actual member exists, test
                    # whether it can be safely overwritten.
                    member = member_object_value['value'][member_property]
                    if 'readonly' in member:
                        global_overwrite = True
                        readonly_value = member['readonly']

        traverser._debug('ASSIGNMENT:DIRECT:GLOB_OVERWRITE %s' %
                         global_overwrite)
        traverser._debug('ASSIGNMENT:DIRECT:READONLY %r' %
                         readonly_value)

        if callable(readonly_value):
            readonly_value = readonly_value(traverser, right, node['right'])

        if readonly_value and global_overwrite:

            kwargs = dict(
                err_id=('testcases_javascript_actions',
                        '_expr_assignment',
                        'global_overwrite'),
                warning='Global variable overwrite',
                description='An attempt was made to overwrite a global '
                            'variable in some JavaScript code.')

            if isinstance(readonly_value, DESCRIPTION_TYPES):
                kwargs['description'] = readonly_value
            elif isinstance(readonly_value, dict):
                kwargs.update(readonly_value)

            traverser.warning(**kwargs)

        return right

    lit_right = right.get_literal_value()

    traverser._debug('ASSIGNMENT>>PARSING LEFT')
    left = traverser._traverse_node(node['left'])
    traverser._debug('ASSIGNMENT>>DONE PARSING LEFT')
    traverser.debug_level -= 1

    if isinstance(left, JSWrapper):
        if left.dirty:
            return left

        lit_left = left.get_literal_value()
        token = node['operator']

        # Don't perform an operation on None. Python freaks out
        if lit_left is None:
            lit_left = 0
        if lit_right is None:
            lit_right = 0

        # Give them default values so we have them in scope.
        gleft, gright = 0, 0

        # All of the assignment operators
        operators = {'=': lambda: right,
                     '+=': lambda: lit_left + lit_right,
                     '-=': lambda: gleft - gright,
                     '*=': lambda: gleft * gright,
                     '/=': lambda: 0 if gright == 0 else (gleft / gright),
                     '%=': lambda: 0 if gright == 0 else (gleft % gright),
                     '<<=': lambda: int(gleft) << int(gright),
                     '>>=': lambda: int(gleft) >> int(gright),
                     '>>>=': lambda: float(abs(int(gleft)) >> gright),
                     '|=': lambda: int(gleft) | int(gright),
                     '^=': lambda: int(gleft) ^ int(gright),
                     '&=': lambda: int(gleft) & int(gright)}

        # If we're modifying a non-numeric type with a numeric operator, return
        # NaN.
        if (not isinstance(lit_left, NUMERIC_TYPES) and
                token in NUMERIC_OPERATORS):
            left.set_value(get_NaN(traverser), traverser=traverser)
            return left

        # If either side of the assignment operator is a string, both sides
        # need to be casted to strings first.
        if (isinstance(lit_left, types.StringTypes) or
                isinstance(lit_right, types.StringTypes)):
            lit_left = _get_as_str(lit_left)
            lit_right = _get_as_str(lit_right)

        gleft, gright = _get_as_num(left), _get_as_num(right)

        traverser._debug('ASSIGNMENT>>OPERATION:%s' % token)
        if token not in operators:
            # We don't support that operator. (yet?)
            traverser._debug('ASSIGNMENT>>OPERATOR NOT FOUND', 1)
            return left
        elif token in ('<<=', '>>=', '>>>=') and gright < 0:
            # The user is doing weird bitshifting that will return 0 in JS but
            # not in Python.
            left.set_value(0, traverser=traverser)
            return left
        elif (token in ('<<=', '>>=', '>>>=', '|=', '^=', '&=') and
              (abs(gleft) == float('inf') or abs(gright) == float('inf'))):
            # Don't bother handling infinity for integer-converted operations.
            left.set_value(get_NaN(traverser), traverser=traverser)
            return left

        traverser._debug('ASSIGNMENT::L-value global? (%s)' %
                         ('Y' if left.is_global else 'N'), 1)
        try:
            new_value = operators[token]()
        except Exception:
            traverser.system_error(exc_info=sys.exc_info())
            new_value = None

        # Cap the length of analyzed strings.
        if (isinstance(new_value, types.StringTypes) and
                len(new_value) > MAX_STR_SIZE):
            new_value = new_value[:MAX_STR_SIZE]

        traverser._debug('ASSIGNMENT::New value >> %s' % new_value, 1)
        left.set_value(new_value, traverser=traverser)
        return left

    # Though it would otherwise be a syntax error, we say that 4=5 should
    # evaluate out to 5.
    return right

Example 106

Project: fjord Source File: connectionpool.py
Function: url_open
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, 'No pool connections are available.')

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            if conn:
                conn.close()
                conn = None
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning('Retrying (%r) after connection '
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info('Redirecting %s -> %s' % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info('Forced retry: %s' % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 107

Project: greentea Source File: mbed_greentea_cli.py
def main():
    """ Closure for main_cli() function """
    parser = optparse.OptionParser()

    parser.add_option('-t', '--target',
                    dest='list_of_targets',
                    help='You can specify list of yotta targets you want to build. Use comma to separate them.' +
                         'Note: If --test-spec switch is defined this list becomes optional list of builds you want to filter in your test:' +
                         'Comma separated list of builds from test specification. Applicable if --test-spec switch is specified')

    parser.add_option('-n', '--test-by-names',
                    dest='test_by_names',
                    help='Runs only test enumerated it this switch. Use comma to separate test case names.')

    parser.add_option('-i', '--skip-test',
                    dest='skip_test',
                    help='Skip tests enumerated it this switch. Use comma to separate test case names.')

    parser.add_option("-O", "--only-build",
                    action="store_true",
                    dest="only_build_tests",
                    default=False,
                    help="Only build repository and tests, skips actual test procedures (flashing etc.)")

    parser.add_option("-S", "--skip-build",
                    action="store_true",
                    dest="skip_yotta_build",
                    default=True,
                    help="Skip calling 'yotta build' on this module")

    copy_methods_str = "Plugin support: " + ', '.join(mbed_host_tests.host_tests_plugins.get_plugin_caps('CopyMethod'))
    parser.add_option("-c", "--copy",
                    dest="copy_method",
                    help="Copy (flash the target) method selector. " + copy_methods_str,
                    metavar="COPY_METHOD")

    parser.add_option('', '--parallel',
                    dest='parallel_test_exec',
                    default=1,
                    help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')

    parser.add_option("-e", "--enum-host-tests",
                    dest="enum_host_tests",
                    help="Define directory with yotta module local host tests. Default: ./test/host_tests")

    parser.add_option('', '--config',
                    dest='verbose_test_configuration_only',
                    default=False,
                    action="store_true",
                    help='Displays connected boards and detected targets and exits.')

    parser.add_option('', '--release',
                    dest='build_to_release',
                    default=False,
                    action="store_true",
                    help='If possible force build in release mode (yotta -r).')

    parser.add_option('', '--debug',
                    dest='build_to_debug',
                    default=False,
                    action="store_true",
                    help='If possible force build in debug mode (yotta -d).')

    parser.add_option('-l', '--list',
                    dest='list_binaries',
                    default=False,
                    action="store_true",
                    help='List available binaries')

    parser.add_option('-g', '--grm',
                    dest='global_resource_mgr',
                    help='Global resource manager service query: platrform name, remote mgr module name, IP address and port, example K64F:module_name:10.2.123.43:3334')

    parser.add_option('-m', '--map-target',
                    dest='map_platform_to_yt_target',
                    help='List of custom mapping between platform name and yotta target. Comma separated list of YOTTA_TARGET:PLATFORM tuples')

    parser.add_option('', '--use-tids',
                    dest='use_target_ids',
                    help='Specify explicitly which devices can be used by Greentea for testing by creating list of allowed Target IDs (use comma separated list)')

    parser.add_option('-u', '--shuffle',
                    dest='shuffle_test_order',
                    default=False,
                    action="store_true",
                    help='Shuffles test execution order')

    parser.add_option('', '--shuffle-seed',
                    dest='shuffle_test_seed',
                    default=None,
                    help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')

    parser.add_option('', '--lock',
                    dest='lock_by_target',
                    default=False,
                    action="store_true",
                    help='Use simple resource locking mechanism to run multiple application instances')

    parser.add_option('', '--digest',
                    dest='digest_source',
                    help='Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output')

    parser.add_option('-H', '--hooks',
                    dest='hooks_json',
                    help='Load hooks used drive extra functionality')

    parser.add_option('', '--test-spec',
                    dest='test_spec',
                    help='Test specification generated by build system.')

    parser.add_option('', '--test-cfg',
                    dest='json_test_configuration',
                    help='Pass to host test data with host test configuration')

    parser.add_option('', '--run',
                    dest='run_app',
                    help='Flash, reset and dump serial from selected binary application')

    parser.add_option('', '--report-junit',
                    dest='report_junit_file_name',
                    help='You can log test suite results in form of JUnit compliant XML report')

    parser.add_option('', '--report-text',
                    dest='report_text_file_name',
                    help='You can log test suite results to text file')

    parser.add_option('', '--report-json',
                    dest='report_json_file_name',
                    help='You can log test suite results to JSON formatted file')

    parser.add_option('', '--report-html',
                    dest='report_html_file_name',
                    help='You can log test suite results in the form of a HTML page')

    parser.add_option('', '--report-fails',
                    dest='report_fails',
                    default=False,
                    action="store_true",
                    help='Prints console outputs for failed tests')

    parser.add_option('', '--yotta-registry',
                    dest='yotta_search_for_mbed_target',
                    default=False,
                    action="store_true",
                    help='Use on-line yotta registry to search for compatible with connected mbed devices yotta targets. Default: search is done in yotta_targets directory')

    parser.add_option('-V', '--verbose-test-result',
                    dest='verbose_test_result_only',
                    default=False,
                    action="store_true",
                    help='Prints test serial output')

    parser.add_option('-v', '--verbose',
                    dest='verbose',
                    default=False,
                    action="store_true",
                    help='Verbose mode (prints some extra information)')

    parser.add_option('', '--plain',
                    dest='plain',
                    default=False,
                    action="store_true",
                    help='Do not use colours while logging')

    parser.add_option('', '--version',
                    dest='version',
                    default=False,
                    action="store_true",
                    help='Prints package version and exits')

    parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool"""
    parser.epilog = """Example: mbedgt --target frdm-k64f-gcc"""

    (opts, args) = parser.parse_args()

    cli_ret = 0

    if not opts.version:
        # This string should not appear when fetching plain version string
        gt_logger.gt_log(get_hello_string())

    start = time()
    if opts.lock_by_target:
        # We are using Greentea proprietary locking mechanism to lock between platforms and targets
        gt_logger.gt_log("using (experimental) simple locking mechanism")
        gt_logger.gt_log_tab("kettle: %s"% GREENTEA_KETTLE_PATH)
        gt_file_sem, gt_file_sem_name, gt_instance_uuid = greentea_get_app_sem()
        with gt_file_sem:
            greentea_update_kettle(gt_instance_uuid)
            try:
                cli_ret = main_cli(opts, args, gt_instance_uuid)
            except KeyboardInterrupt:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
                return(-2)    # Keyboard interrupt
            except:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("unexpected error:")
                gt_logger.gt_log_tab(sys.exc_info()[0])
                raise
            greentea_clean_kettle(gt_instance_uuid)
    else:
        # Standard mode of operation
        # Other instance must provide mutually exclusive access control to platforms and targets
        try:
            cli_ret = main_cli(opts, args)
        except KeyboardInterrupt:
            gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
            return(-2)    # Keyboard interrupt
        except Exception as e:
            gt_logger.gt_log_err("unexpected error:")
            gt_logger.gt_log_tab(str(e))
            raise

    if not any([opts.list_binaries, opts.version]):
        delta = time() - start  # Test execution time delta
        gt_logger.gt_log("completed in %.2f sec"% delta)

    if cli_ret:
        gt_logger.gt_log_err("exited with code %d"% cli_ret)

    return(cli_ret)

Example 108

Project: spinalcordtoolbox Source File: sct_straighten_spinalcord.py
    def straighten(self):
        # Initialization
        fname_anat = self.input_filename
        fname_centerline = self.centerline_filename
        fname_output = self.output_filename
        gapxy = self.gapxy
        gapz = self.gapz
        leftright_width = self.leftright_width
        remove_temp_files = self.remove_temp_files
        verbose = self.verbose
        interpolation_warp = self.interpolation_warp
        algo_fitting = self.algo_fitting
        window_length = self.window_length
        type_window = self.type_window
        qc = self.qc

        # start timer
        start_time = time.time()

        # get path of the toolbox
        status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
        sct.printv(path_sct, verbose)

        # Display arguments
        sct.printv("\nCheck input arguments:", verbose)
        sct.printv("  Input volume ...................... " + fname_anat, verbose)
        sct.printv("  Centerline ........................ " + fname_centerline, verbose)
        sct.printv("  Final interpolation ............... " + interpolation_warp, verbose)
        sct.printv("  Verbose ........................... " + str(verbose), verbose)
        sct.printv("", verbose)

        # Extract path/file/extension
        path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
        path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)

        # create temporary folder
        path_tmp = sct.tmp_create(verbose=verbose)

        # Copying input data to tmp folder
        sct.printv('\nCopy files to tmp folder...', verbose)
        sct.run('sct_convert -i '+fname_anat+' -o '+path_tmp+'data.nii')
        sct.run('sct_convert -i '+fname_centerline+' -o '+path_tmp+'centerline.nii.gz')

        # go to tmp folder
        os.chdir(path_tmp)

        try:
            # Change orientation of the input centerline into RPI
            sct.printv("\nOrient centerline to RPI orientation...", verbose)
            sct.run('sct_image -i centerline.nii.gz -setorient RPI -o centerline_rpi.nii.gz')

            # Get dimension
            sct.printv('\nGet dimensions...', verbose)
            from msct_image import Image
            image_centerline = Image('centerline_rpi.nii.gz')
            nx, ny, nz, nt, px, py, pz, pt = image_centerline.dim
            sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose)
            sct.printv('.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose)

            if np.min(image_centerline.data) < 0 or np.max(image_centerline.data) > 1:
                image_centerline.data[image_centerline.data < 0] = 0
                image_centerline.data[image_centerline.data > 1] = 1
                image_centerline.save()

            """
            Steps: (everything is done in physical space)
            1. open input image and centreline image
            2. extract bspline fitting of the centreline, and its derivatives
            3. compute length of centerline
            4. compute and generate straight space
            5. compute transformations
                for each voxel of one space: (done using matrices --> improves speed by a factor x300)
                    a. determine which plane of spinal cord centreline it is included
                    b. compute the position of the voxel in the plane (X and Y distance from centreline, along the plane)
                    c. find the correspondant centreline point in the other space
                    d. find the correspondance of the voxel in the corresponding plane
            6. generate warping fields for each transformations
            7. write warping fields and apply them

            step 5.b: how to find the corresponding plane?
                The centerline plane corresponding to a voxel correspond to the nearest point of the centerline.
                However, we need to compute the distance between the voxel position and the plane to be sure it is part of the plane and not too distant.
                If it is more far than a threshold, warping value should be 0.

            step 5.d: how to make the correspondance between centerline point in both images?
                Both centerline have the same lenght. Therefore, we can map centerline point via their position along the curve.
                If we use the same number of points uniformely along the spinal cord (1000 for example), the correspondance is straight-forward.
            """

            # number of points along the spinal cord
            if algo_fitting == 'hanning':
                number_of_points = nz
            else:
                number_of_points = int(self.precision * (float(nz) / pz))
                if number_of_points < 100:
                    number_of_points *= 50

            # 2. extract bspline fitting of the centreline, and its derivatives
            x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline('centerline_rpi.nii.gz', algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, verbose=verbose, nurbs_pts_number=number_of_points, all_slices=False, phys_coordinates=True, remove_outliers=True)
            from msct_types import Centerline
            centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv)

            # ==========================================================================================
            sct.printv("\nCreate the straight space and the safe zone...", verbose)
            # 3. compute length of centerline
            # compute the length of the spinal cord based on fitted centerline and size of centerline in z direction
            from math import sqrt, atan2, sin

            # Computation of the safe zone.
            # The safe zone is defined as the length of the spinal cord for which an axial segmentation will be complete
            # The safe length (to remove) is computed using the safe radius (given as parameter) and the angle of the
            # last centerline point with the inferior-superior direction. Formula: Ls = Rs * sin(angle)
            # Calculate Ls for both edges and remove appropriate number of centerline points
            radius_safe = 0.0  # mm

            # inferior edge
            u = np.array([x_centerline_deriv[0], y_centerline_deriv[0], z_centerline_deriv[0]])
            v = np.array([0, 0, -1])
            angle_inferior = atan2(np.linalg.norm(np.cross(u, v)), np.dot(u, v))
            length_safe_inferior = radius_safe * sin(angle_inferior)

            # superior edge
            u = np.array([x_centerline_deriv[-1], y_centerline_deriv[-1], z_centerline_deriv[-1]])
            v = np.array([0, 0, 1])
            angle_superior = atan2(np.linalg.norm(np.cross(u, v)), np.dot(u, v))
            length_safe_superior = radius_safe * sin(angle_superior)

            # remove points
            from bisect import bisect
            inferior_bound = bisect(centerline.progressive_length, length_safe_inferior) - 1
            superior_bound = centerline.number_of_points - bisect(centerline.progressive_length_inverse, length_safe_superior)

            length_centerline = centerline.length
            size_z_centerline = z_centerline[-1] - z_centerline[0]

            # compute the size factor between initial centerline and straight bended centerline
            factor_curved_straight = length_centerline / size_z_centerline
            middle_slice = (z_centerline[0] + z_centerline[-1]) / 2.0

            bound_curved = [z_centerline[inferior_bound], z_centerline[superior_bound]]
            bound_straight = [(z_centerline[inferior_bound] - middle_slice) * factor_curved_straight + middle_slice,
                              (z_centerline[superior_bound] - middle_slice) * factor_curved_straight + middle_slice]

            if verbose == 2:
                print "Length of spinal cord = ", str(length_centerline)
                print "Size of spinal cord in z direction = ", str(size_z_centerline)
                print "Ratio length/size = ", str(factor_curved_straight)
                print "Safe zone boundaries: "
                print "Curved space = ", bound_curved
                print "Straight space = ", bound_straight

            # 4. compute and generate straight space
            # points along curved centerline are already regularly spaced.
            # calculate position of points along straight centerline

            # Create straight NIFTI volumes
            # ==========================================================================================
            sct.printv('\nPad input volume to account for spinal cord length...', verbose)
            from numpy import ceil
            start_point = (z_centerline[0] - middle_slice) * factor_curved_straight + middle_slice
            end_point = (z_centerline[-1] - middle_slice) * factor_curved_straight + middle_slice

            padding_z = int(ceil(1.5 * ((length_centerline - size_z_centerline) / 2.0) / pz))
            sct.run('sct_image -i centerline_rpi.nii.gz -o tmp.centerline_pad.nii.gz -pad 0,0,'+str(padding_z))
            image_centerline_pad = Image('centerline_rpi.nii.gz')
            nx, ny, nz, nt, px, py, pz, pt = image_centerline_pad.dim
            hdr_warp = image_centerline_pad.hdr.copy()
            start_point_coord = image_centerline_pad.transfo_phys2pix([[0, 0, start_point]])[0]
            end_point_coord = image_centerline_pad.transfo_phys2pix([[0, 0, end_point]])[0]

            straight_size_x = int(35 / px)
            straight_size_y = int(35 / py)
            warp_space_x = [int(np.round(nx / 2)) - straight_size_x, int(np.round(nx / 2)) + straight_size_x]
            warp_space_y = [int(np.round(ny / 2)) - straight_size_y, int(np.round(ny / 2)) + straight_size_y]
            if warp_space_x[0] < 0:
                warp_space_x[1] += warp_space_x[0] - 2
                warp_space_x[0] = 0
            if warp_space_y[0] < 0:
                warp_space_y[1] += warp_space_y[0] - 2
                warp_space_y[0] = 0

            sct.run('sct_crop_image -i tmp.centerline_pad.nii.gz -o tmp.centerline_pad_crop.nii.gz -dim 0,1,2 -start ' + str(warp_space_x[0]) + ',' + str(warp_space_y[0]) + ',0 -end ' + str(warp_space_x[1]) + ',' + str(warp_space_y[1]) + ',' + str(end_point_coord[2] - start_point_coord[2]))
            image_centerline_straight = Image('tmp.centerline_pad_crop.nii.gz')
            nx_s, ny_s, nz_s, nt_s, px_s, py_s, pz_s, pt_s = image_centerline_straight.dim
            hdr_warp_s = image_centerline_straight.hdr.copy()
            hdr_warp_s.set_data_dtype('float32')
            #origin = [(nx_s * px_s)/2.0, -(ny_s * py_s)/2.0, -(nz_s * pz_s)/2.0]
            #hdr_warp_s.structarr['qoffset_x'] = origin[0]
            #hdr_warp_s.structarr['qoffset_y'] = origin[1]
            #hdr_warp_s.structarr['qoffset_z'] = origin[2]
            #hdr_warp_s.structarr['srow_x'][-1] = origin[0]
            #hdr_warp_s.structarr['srow_y'][-1] = origin[1]
            #hdr_warp_s.structarr['srow_z'][-1] = origin[2]
            hdr_warp_s.structarr['quatern_b'] = 0.0
            hdr_warp_s.structarr['quatern_c'] = 1.0
            hdr_warp_s.structarr['quatern_d'] = 0.0
            hdr_warp_s.structarr['srow_x'][0] = -px_s
            hdr_warp_s.structarr['srow_x'][1] = 0.0
            hdr_warp_s.structarr['srow_x'][2] = 0.0
            hdr_warp_s.structarr['srow_y'][0] = 0.0
            hdr_warp_s.structarr['srow_y'][1] = py_s
            hdr_warp_s.structarr['srow_y'][2] = 0.0
            hdr_warp_s.structarr['srow_z'][0] = 0.0
            hdr_warp_s.structarr['srow_z'][1] = 0.0
            hdr_warp_s.structarr['srow_z'][2] = pz_s
            image_centerline_straight.hdr = hdr_warp_s
            image_centerline_straight.compute_transform_matrix()
            image_centerline_straight.save()

            start_point_coord = image_centerline_pad.transfo_phys2pix([[0, 0, start_point]])[0]
            end_point_coord = image_centerline_pad.transfo_phys2pix([[0, 0, end_point]])[0]

            number_of_voxel = nx * ny * nz
            sct.printv("Number of voxel = " + str(number_of_voxel))

            time_centerlines = time.time()
            
            from numpy import linspace
            ix_straight = [int(np.round(nx_s / 2))] * number_of_points
            iy_straight = [int(np.round(ny_s / 2))] * number_of_points
            iz_straight = linspace(0, end_point_coord[2] - start_point_coord[2], number_of_points)
            dx_straight = [0.0] * number_of_points
            dy_straight = [0.0] * number_of_points
            dz_straight = [1.0] * number_of_points
            coord_straight = np.array(zip(ix_straight, iy_straight, iz_straight))
            coord_phys_straight = np.asarray(image_centerline_straight.transfo_pix2phys(coord_straight))

            centerline_straight = Centerline(coord_phys_straight[:, 0], coord_phys_straight[:, 1], coord_phys_straight[:, 2],
                                             dx_straight, dy_straight, dz_straight)


            time_centerlines = time.time() - time_centerlines
            sct.printv('Time to generate centerline: ' + str(np.round(time_centerlines * 1000.0)) + ' ms', verbose)

            """
            import matplotlib.pyplot as plt
            curved_points = centerline.progressive_length
            straight_points = centerline_straight.progressive_length
            range_points = linspace(0, 1, number_of_points)
            dist_curved = np.zeros(number_of_points)
            dist_straight = np.zeros(number_of_points)
            for i in range(1, number_of_points):
                dist_curved[i] = dist_curved[i - 1] + curved_points[i - 1] / centerline.length
                dist_straight[i] = dist_straight[i - 1] + straight_points[i - 1] / centerline_straight.length
            plt.plot(range_points, dist_curved)
            plt.plot(range_points, dist_straight)
            plt.grid(True)
            plt.show()
            """

            # Create volumes containing curved and straight warping fields
            time_generation_volumes = time.time()
            data_warp_curved2straight = np.zeros((nx_s, ny_s, nz_s, 1, 3))
            data_warp_straight2curved = np.zeros((nx, ny, nz, 1, 3))

            # 5. compute transformations
            # Curved and straight images and the same dimensions, so we compute both warping fields at the same time.
            # b. determine which plane of spinal cord centreline it is included
            x, y, z = np.mgrid[0:nx, 0:ny, 0:nz]
            indexes = np.array(zip(x.ravel(), y.ravel(), z.ravel()))
            x_s, y_s, z_s = np.mgrid[0:nx_s, 0:ny_s, 0:nz_s]
            indexes_straight = np.array(zip(x_s.ravel(), y_s.ravel(), z_s.ravel()))
            time_generation_volumes = time.time() - time_generation_volumes
            sct.printv('Time to generate volumes and indices: ' + str(np.round(time_generation_volumes * 1000.0)) + ' ms', verbose)

            time_find_nearest_indexes = time.time()
            physical_coordinates = image_centerline_pad.transfo_pix2phys(indexes)
            physical_coordinates_straight = image_centerline_straight.transfo_pix2phys(indexes_straight)
            nearest_indexes_curved = centerline.find_nearest_indexes(physical_coordinates)
            nearest_indexes_straight = centerline_straight.find_nearest_indexes(physical_coordinates_straight)
            time_find_nearest_indexes = time.time() - time_find_nearest_indexes
            sct.printv('Time to find nearest centerline points: ' + str(np.round(time_find_nearest_indexes * 1000.0)) + ' ms', verbose)

            # compute the distance from voxels to corresponding plans.
            # This distance is used to blackout voxels that are not in the modified image.
            time_get_distances_from_planes = time.time()
            distances_curved = centerline.get_distances_from_planes(physical_coordinates, nearest_indexes_curved)
            distances_straight = centerline_straight.get_distances_from_planes(physical_coordinates_straight, nearest_indexes_straight)
            indexes_out_distance_curved = np.logical_or(distances_curved > self.threshold_distance, distances_curved < -self.threshold_distance)
            indexes_out_distance_straight = np.logical_or(distances_straight > self.threshold_distance, distances_straight < -self.threshold_distance)
            time_get_distances_from_planes = time.time() - time_get_distances_from_planes
            sct.printv('Time to compute distance between voxels and nearest planes: ' + str(np.round(time_get_distances_from_planes * 1000.0)) + ' ms', verbose)

            # c. compute the position of the voxel in the plane coordinate system
            # (X and Y distance from centreline, along the plane)
            time_get_projected_coordinates_on_planes = time.time()
            projected_points_curved = centerline.get_projected_coordinates_on_planes(physical_coordinates, nearest_indexes_curved)
            projected_points_straight = centerline_straight.get_projected_coordinates_on_planes(physical_coordinates_straight, nearest_indexes_straight)
            time_get_projected_coordinates_on_planes = time.time() - time_get_projected_coordinates_on_planes
            sct.printv('Time to get projected voxels on planes: ' + str(np.round(time_get_projected_coordinates_on_planes * 1000.0)) + ' ms', verbose)

            # e. find the correspondance of the voxel in the corresponding plane
            time_get_in_plans_coordinates = time.time()
            coord_in_planes_curved = centerline.get_in_plans_coordinates(projected_points_curved, nearest_indexes_curved)
            coord_in_planes_straight = centerline_straight.get_in_plans_coordinates(projected_points_straight, nearest_indexes_straight)
            time_get_in_plans_coordinates = time.time() - time_get_in_plans_coordinates
            sct.printv('Time to get in-plane coordinates: ' + str(np.round(time_get_in_plans_coordinates * 1000.0)) + ' ms', verbose)

            # 6. generate warping fields for each transformations
            # compute coordinate in straight space based on position on plane
            time_displacements = time.time()
            coord_curved2straight = centerline_straight.points[nearest_indexes_curved]
            coord_curved2straight[:, 0:2] += coord_in_planes_curved[:, 0:2]
            coord_curved2straight[:, 2] += distances_curved

            displacements_curved = coord_curved2straight - physical_coordinates
            # for some reason, displacement in Z is inverted. Probably due to left/right-hended definition of referential.
            #displacements_curved[:, 0] = -displacements_curved[:, 0]
            displacements_curved[:, 2] = -displacements_curved[:, 2]
            displacements_curved[indexes_out_distance_curved] = [100000.0, 100000.0, 100000.0]

            coord_straight2curved = centerline.get_inverse_plans_coordinates(coord_in_planes_straight, nearest_indexes_straight)
            displacements_straight = coord_straight2curved - physical_coordinates_straight
            # for some reason, displacement in Z is inverted. Probably due to left/right-handed definition of referential.
            #displacements_straight[:, 0] = -displacements_straight[:, 0]
            displacements_straight[:, 2] = -displacements_straight[:, 2]
            displacements_straight[indexes_out_distance_straight] = [100000.0, 100000.0, 100000.0]

            # For error-free interpolation purpose, warping fields are inverted in the definition of ITK.
            data_warp_curved2straight[indexes_straight[:, 0], indexes_straight[:, 1], indexes_straight[:, 2], 0, :] = -displacements_straight
            data_warp_straight2curved[indexes[:, 0], indexes[:, 1], indexes[:, 2], 0, :] = -displacements_curved

            time_displacements = time.time() - time_displacements
            sct.printv('Time to compute physical displacements: ' + str(np.round(time_displacements * 1000.0)) + ' ms', verbose)

            # Creation of the safe zone based on pre-calculated safe boundaries
            coord_bound_curved_inf, coord_bound_curved_sup = image_centerline_pad.transfo_phys2pix([[0, 0, bound_curved[0]]]), image_centerline_pad.transfo_phys2pix([[0, 0, bound_curved[1]]])
            coord_bound_straight_inf, coord_bound_straight_sup = image_centerline_straight.transfo_phys2pix([[0, 0, bound_straight[0]]]), image_centerline_straight.transfo_phys2pix([[0, 0, bound_straight[1]]])

            if radius_safe > 0:
                data_warp_curved2straight[:, :, 0:coord_bound_straight_inf[0][2], 0, :] = 100000.0
                data_warp_curved2straight[:, :, coord_bound_straight_sup[0][2]:, 0, :] = 100000.0
                data_warp_straight2curved[:, :, 0:coord_bound_curved_inf[0][2], 0, :] = 100000.0
                data_warp_straight2curved[:, :, coord_bound_curved_sup[0][2]:, 0, :] = 100000.0

            # Generate warp files as a warping fields
            hdr_warp_s.set_intent('vector', (), '')
            hdr_warp_s.set_data_dtype('float32')
            hdr_warp.set_intent('vector', (), '')
            hdr_warp.set_data_dtype('float32')
            img = Nifti1Image(data_warp_curved2straight, None, hdr_warp_s)
            save(img, 'tmp.curve2straight.nii.gz')
            sct.printv('\nDONE ! Warping field generated: tmp.curve2straight.nii.gz', verbose)

            img = Nifti1Image(data_warp_straight2curved, None, hdr_warp)
            save(img, 'tmp.straight2curve.nii.gz')
            sct.printv('\nDONE ! Warping field generated: tmp.straight2curve.nii.gz', verbose)

            # Apply transformation to input image
            sct.printv('\nApply transformation to input image...', verbose)
            sct.run('sct_apply_transfo -i data.nii -d tmp.centerline_pad_crop.nii.gz -o tmp.anat_rigid_warp.nii.gz -w tmp.curve2straight.nii.gz -x '+interpolation_warp, verbose)

            # compute the error between the straightened centerline/segmentation and the central vertical line.
            # Ideally, the error should be zero.
            # Apply deformation to input image
            sct.printv('\nApply transformation to centerline image...', verbose)
            Transform(input_filename='centerline.nii.gz', fname_dest="tmp.centerline_pad_crop.nii.gz",
                      output_filename="tmp.centerline_straight.nii.gz", interp="nn",
                      warp="tmp.curve2straight.nii.gz", verbose=verbose).apply()
            from msct_image import Image
            file_centerline_straight = Image('tmp.centerline_straight.nii.gz', verbose=verbose)
            coordinates_centerline = file_centerline_straight.getNonZeroCoordinates(sorting='z')
            mean_coord = []
            for z in range(coordinates_centerline[0].z, coordinates_centerline[-1].z):
                temp_mean = [coord.value for coord in coordinates_centerline if coord.z == z]
                if temp_mean:
                    mean_value = np.mean(temp_mean)
                    mean_coord.append(np.mean([[coord.x * coord.value / mean_value, coord.y * coord.value / mean_value]
                                                for coord in coordinates_centerline if coord.z == z], axis=0))

            # compute error between the straightened centerline and the straight line.
            from math import sqrt
            x0 = file_centerline_straight.data.shape[0]/2.0
            y0 = file_centerline_straight.data.shape[1]/2.0
            count_mean = 0
            for coord_z in mean_coord[2:-2]:  # we don't include the four extrema because there are usually messy.
                if not np.isnan(np.sum(coord_z)):
                    dist = ((x0-coord_z[0])*px)**2 + ((y0-coord_z[1])*py)**2
                    self.mse_straightening += dist
                    dist = sqrt(dist)
                    if dist > self.max_distance_straightening:
                        self.max_distance_straightening = dist
                    count_mean += 1
            self.mse_straightening = sqrt(self.mse_straightening/float(count_mean))

        except Exception as e:
            sct.printv('WARNING: Exception during Straightening:', 1, 'warning')
            sct.printv('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), 1, 'warning')
            sct.printv(str(e), 1, 'warning')

        os.chdir('..')

        # Generate output file (in current folder)
        # TODO: do not uncompress the warping field, it is too time consuming!
        sct.printv("\nGenerate output file (in current folder)...", verbose)
        sct.generate_output_file(path_tmp + "/tmp.curve2straight.nii.gz", self.path_output + "warp_curve2straight.nii.gz", verbose)
        sct.generate_output_file(path_tmp + "/tmp.straight2curve.nii.gz", self.path_output + "warp_straight2curve.nii.gz", verbose)
        # create ref_straight.nii.gz file that can be used by other SCT functions that need a straight reference space
        shutil.copy(path_tmp+'/tmp.anat_rigid_warp.nii.gz', 'straight_ref.nii.gz')
        # move straightened input file
        if fname_output == '':
            fname_straight = sct.generate_output_file(path_tmp + "/tmp.anat_rigid_warp.nii.gz",
                                                      self.path_output + file_anat + "_straight" + ext_anat, verbose)
        else:
            fname_straight = sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz',
                                                      self.path_output + fname_output, verbose)  # straightened anatomic

        # Remove temporary files
        if remove_temp_files:
            sct.printv("\nRemove temporary files...", verbose)
            sct.run("rm -rf " + path_tmp, verbose)

        sct.printv('\nDone!\n', verbose)

        sct.printv("Maximum x-y error = " + str(np.round(self.max_distance_straightening, 2)) + " mm", verbose, "bold")
        sct.printv("Accuracy of straightening (MSE) = " + str(np.round(self.mse_straightening, 2)) +
                   " mm", verbose, "bold")

        # display elapsed time
        elapsed_time = time.time() - start_time
        sct.printv("\nFinished! Elapsed time: " + str(int(np.round(elapsed_time))) + "s", verbose)
        sct.printv("\nTo view results, type:", verbose)
        sct.printv("fslview " + fname_straight + " &\n", verbose, 'info')

        # output QC image
        if qc:
            from msct_image import Image
            Image(fname_straight).save_quality_control(plane='sagittal', n_slices=1, path_output=self.path_output)

Example 109

Project: w2p_tvseries Source File: connectionpool.py
Function: url_open
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            conn = self._get_conn(timeout=pool_timeout)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            stacktrace = sys.exc_info()[2]
            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e,
                                        _pool=self, _stacktrace=stacktrace)
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 110

Project: nipy Source File: nifti_ref.py
def nipy2nifti(img, data_dtype=None, strict=None, fix0=True):
    """ Return NIFTI image from nipy image `img`

    Parameters
    ----------
    img : object
         An object, usually a NIPY ``Image``,  having attributes `coordmap` and
         `shape`
    data_dtype : None or dtype specifier
        None means try and use header dtype, otherwise try and use data dtype,
        otherwise use np.float32.  A dtype specifier means set the header output
        data dtype using ``np.dtype(data_dtype)``.
    strict : bool, optional
        Whether to use strict checking of input image for creating NIFTI
    fix0: bool, optional
        Whether to fix potential 0 column / row in affine. This option only used
        when trying to find time etc axes in the coordmap output names.  In
        order to find matching input names, we need to use the corresponding
        rows and columns in the affine.  Sometimes time, in particular, has 0
        scaling, and thus all 0 in the corresponding row / column.  In that case
        it's hard to work out which input corresponds. If `fix0` is True, and
        there is only one all zero (matrix part of the) affine row, and only one
        all zero (matrix part of the) affine column, fix scaling for that
        combination to zero, assuming this a zero scaling for time.

    Returns
    -------
    ni_img : ``nibabel.Nifti1Image``
        NIFTI image

    Raises
    ------
    NiftiError: if space axes not orthogonal to non-space axes
    NiftiError: if non-space axes not orthogonal to each other
    NiftiError: if `img` output space does not match named spaces in NIFTI
    NiftiError: if input image has more than 7 dimensions
    NiftiError: if input image has 7 dimensions, but no time dimension, because
        we need to add an extra 1 length axis at position 3
    NiftiError: if we find a time-like input axis but the matching output axis
        is a different time-like.
    NiftiError: if we find a time-like output axis but the matching input axis
        is a different time-like.
    NiftiError: if we find a time output axis and there are non-zero non-spatial
        offsets in the affine, but we can't find a corresponding input axis.

    Notes
    -----
    First, we need to create a valid XYZ Affine.  We check if this can be done
    by checking if there are recognizable X, Y, Z output axes and corresponding
    input (voxel) axes.  This requires the input image to be at least 3D. If we
    find these requirements, we reorder the image axes to have XYZ output axes
    and 3 spatial input axes first, and get the corresponding XYZ affine.

    If the spatial dimensions are not orthogonal to the non-spatial dimensions,
    raise a NiftiError.

    If the non-spatial dimensions are not orthogonal to each other, raise a
    NiftiError.

    We check if the XYZ output fits with the NIFTI named spaces of scanner,
    aligned, Talairach, MNI.  If so, set the NIFTI code and qform, sform
    accordingly.  If the space corresponds to 'unknown' then we must set the
    NIFTI transform codes to 0, and the affine must match the affine we will get
    from loading the NIFTI with no qform, sform.  If not, we're going to lose
    information in the affine, and raise an error.

    If any of the first three input axes are named ('slice', 'freq', 'phase')
    set the ``dim_info`` field accordingly.

    Set the ``xyzt_units`` field to indicate millimeters and seconds, if there
    is a 't' axis, otherwise millimeters and 0 (unknown).

    We look to see if we have a time-like axis in the inputs or the outputs. A
    time-like axis has labels 't', 'hz', 'ppm', 'rads'.  If we have an axis 't'
    in the inputs *and* the outputs, check they either correspond, or both
    inputs and output correspond with no other axis, otherwise raise NiftiError.
    Do the same check for 'hz', then 'ppm', then 'rads'.

    If we do have a time-like axis, roll that axis to be the 4th axis.  If this
    axis is actually time, take the ``affine[3, -1]`` and put into the
    ``toffset`` field.  If there's no time-like axis, but there are other
    non-spatial axes, make a length 1 4th array axis to indicate this.

    If the resulting NIFTI image has more than 7 dimensions, raise a NiftiError.

    Set ``pixdim`` for axes >= 3 using vector length of corresponding affine
    columns.

    We don't set the intent-related fields for now.
    """
    strict_none = strict is None
    if strict_none:
        warnings.warn('Default `strict` currently False; this will change to '
                      'True in a future version of nipy',
                      FutureWarning,
                      stacklevel = 2)
        strict = False
    known_names = ncrs.known_names
    if not strict: # add simple 'xyz' to acceptable spatial names
        known_names = copy(known_names) # copy module global dict
        for c in 'xyz':
            known_names[c] = c
    try:
        img = as_xyz_image(img, known_names)
    except (ncrs.AxesError, ncrs.AffineError):
        # Python 2.5 / 3 compatibility
        e = sys.exc_info()[1]
        raise NiftiError('Image cannot be reordered to XYZ because: "%s"'
                         % e)
    coordmap = img.coordmap
    # Get useful information from old header
    in_hdr = img.metadata.get('header', None)
    hdr = nib.Nifti1Header.from_header(in_hdr)
    # Default behavior is to take datatype from old header, unless there was no
    # header, in which case we try to use the data dtype.
    data = None
    if data_dtype is None:
        if in_hdr is None:
            data = img.get_data()
            data_dtype = data.dtype
        else:
            data_dtype = in_hdr.get_data_dtype()
    else:
        data_dtype = np.dtype(data_dtype)
    hdr.set_data_dtype(data_dtype)
    # Remaining axes orthogonal?
    rzs, trans = to_matvec(coordmap.affine)
    if (not np.allclose(rzs[3:, :3], 0) or
        not np.allclose(rzs[:3, 3:], 0)):
        raise NiftiError('Non space axes not orthogonal to space')
    # And to each other?
    nsp_affine = rzs[3:,3:]
    nsp_nzs = np.abs(nsp_affine) > TINY
    n_in_col = np.sum(nsp_nzs, axis=0)
    n_in_row = np.sum(nsp_nzs, axis=1)
    if np.any(n_in_col > 1) or np.any(n_in_row > 1):
        raise NiftiError('Non space axes not orthogonal to each other')
    # Affine seems OK, check for space
    xyz_affine = ncrs.xyz_affine(coordmap, known_names)
    spatial_output_names = coordmap.function_range.coord_names[:3]
    out_space = CS(spatial_output_names)
    for name, space in XFORM2SPACE.items():
        if out_space in space:
            hdr.set_sform(xyz_affine, name)
            hdr.set_qform(xyz_affine, name)
            break
    else:
        if not strict and spatial_output_names == ('x', 'y', 'z'):
            warnings.warn('Default `strict` currently False; '
                          'this will change to True in a future version of '
                          'nipy; output names of "x", "y", "z" will raise '
                          'an error.  Please use canonical output names from '
                          'nipy.core.reference.spaces',
                          FutureWarning,
                          stacklevel = 2)
            hdr.set_sform(xyz_affine, 'scanner')
            hdr.set_qform(xyz_affine, 'scanner')
        elif not out_space in ncrs.unknown_space: # no space we recognize
            raise NiftiError('Image world not a NIFTI world')
        else: # unknown space requires affine that matches
            # Set guessed shape to set zooms correctly
            hdr.set_data_shape(img.shape)
            # Use qform set to set the zooms, but with 'unknown' code
            hdr.set_qform(xyz_affine, 'unknown')
            hdr.set_sform(None)
            if not np.allclose(xyz_affine, hdr.get_base_affine()):
                raise NiftiError("Image world is 'unknown' but affine not "
                                 "compatible; please reset image world or "
                                 "affine")
    # Use list() to get .index method for python < 2.6
    input_names = list(coordmap.function_domain.coord_names)
    spatial_names = input_names[:3]
    dim_infos = []
    for fps in 'freq', 'phase', 'slice':
        dim_infos.append(
            spatial_names.index(fps) if fps in spatial_names else None)
    hdr.set_dim_info(*dim_infos)
    # Set units without knowing time
    hdr.set_xyzt_units(xyz='mm')
    # Done if we only have 3 input dimensions
    n_ns = coordmap.ndims[0] - 3
    if n_ns == 0: # No non-spatial dimensions
        return nib.Nifti1Image(img.get_data(), xyz_affine, hdr)
    elif n_ns > 4:
        raise NiftiError("Too many dimensions to convert")
    # Go now to data, pixdims
    if data is None:
        data = img.get_data()
    rzs, trans = to_matvec(img.coordmap.affine)
    ns_pixdims = list(np.sqrt(np.sum(rzs[3:, 3:] ** 2, axis=0)))
    in_ax, out_ax, tl_name = _find_time_like(coordmap, fix0)
    if in_ax is None: # No time-like axes
        # add new 1-length axis
        if n_ns == 4:
            raise NiftiError("Too many dimensions to convert")
        n_ns += 1
        data = data[:, :, :, None, ...]
        # xyzt_units
        hdr.set_xyzt_units(xyz='mm')
        # shift pixdims
        ns_pixdims.insert(0, 0)
    else: # Time-like
        hdr.set_xyzt_units(xyz='mm', t=TIME_LIKE_AXES[tl_name]['units'])
        # If this is really time, set toffset
        if tl_name == 't' and np.any(trans[3:]):
            # Which output axis corresponds to time?
            if out_ax is None:
                raise NiftiError('Time input and output do not match')
            hdr['toffset'] = trans[out_ax]
        # Make sure this time-like axis is first non-space axis
        if in_ax != 3:
            data = np.rollaxis(data, in_ax, 3)
            order = list(range(n_ns))
            order.pop(in_ax - 3)
            order.insert(0, in_ax - 3)
            ns_pixdims = [ns_pixdims[i] for i in order]
    hdr['pixdim'][4:(4 + n_ns)] = ns_pixdims
    return nib.Nifti1Image(data, xyz_affine, hdr)

Example 111

Project: offlineimap Source File: accounts.py
Function: syncfolder
def syncfolder(account, remotefolder, quick):
    """Synchronizes given remote folder for the specified account.

    Filtered folders on the remote side will not invoke this function. However,
    this might be called in a concurrently."""

    def check_uid_validity(localfolder, remotefolder, statusfolder):
        # If either the local or the status folder has messages and
        # there is a UID validity problem, warn and abort.  If there are
        # no messages, UW IMAPd loses UIDVALIDITY.  But we don't really
        # need it if both local folders are empty.  So, in that case,
        # just save it off.
        if localfolder.getmessagecount() > 0 or statusfolder.getmessagecount() > 0:
            if not localfolder.check_uidvalidity():
                ui.validityproblem(localfolder)
                localfolder.repository.restore_atime()
                return
            if not remotefolder.check_uidvalidity():
                ui.validityproblem(remotefolder)
                localrepos.restore_atime()
                return
        else:
            # Both folders empty, just save new UIDVALIDITY.
            localfolder.save_uidvalidity()
            remotefolder.save_uidvalidity()

    def save_min_uid(folder, min_uid):
        uidfile = folder.get_min_uid_file()
        fd = open(uidfile, 'wt')
        fd.write(str(min_uid) + "\n")
        fd.close()

    def cachemessagelists_upto_date(localfolder, remotefolder, date):
        """Returns messages with uid > min(uids of messages newer than date)."""

        localfolder.cachemessagelist(min_date=date)
        check_uid_validity(localfolder, remotefolder, statusfolder)
        # Local messagelist had date restriction applied already. Restrict
        # sync to messages with UIDs >= min_uid from this list.
        #
        # Local messagelist might contain new messages (with uid's < 0).
        positive_uids = [uid for uid in localfolder.getmessageuidlist() if uid > 0]
        if len(positive_uids) > 0:
            remotefolder.cachemessagelist(min_uid=min(positive_uids))
        else:
            # No messages with UID > 0 in range in localfolder.
            # date restriction was applied with respect to local dates but
            # remote folder timezone might be different from local, so be
            # safe and make sure the range isn't bigger than in local.
            remotefolder.cachemessagelist(
                min_date=time.gmtime(time.mktime(date) + 24*60*60))

    def cachemessagelists_startdate(new, partial, date):
        """Retrieve messagelists when startdate has been set for
        the folder 'partial'.

        Idea: suppose you want to clone the messages after date in one
        account (partial) to a new one (new). If new is empty, then copy
        messages in partial newer than date to new, and keep track of the
        min uid. On subsequent syncs, sync all the messages in new against
        those after that min uid in partial. This is a partial replacement
        for maxage in the IMAP-IMAP sync case, where maxage doesn't work:
        the UIDs of the messages in localfolder might not be in the same
        order as those of corresponding messages in remotefolder, so if L in
        local corresponds to R in remote, the ranges [L, ...] and [R, ...]
        might not correspond. But, if we're cloning a folder into a new one,
        [min_uid, ...] does correspond to [1, ...].

        This is just for IMAP-IMAP. For Maildir-IMAP, use maxage instead."""

        new.cachemessagelist()
        min_uid = partial.retrieve_min_uid()
        if min_uid == None: # min_uid file didn't exist
            if len(new.getmessageuidlist()) > 0:
                raise OfflineImapError("To use startdate on Repository %s, "
                    "Repository %s must be empty"%
                    (partial.repository.name, new.repository.name),
                    OfflineImapError.ERROR.MESSAGE)
            else:
                partial.cachemessagelist(min_date=date)
                # messagelist.keys() instead of getuidmessagelist() because in
                # the UID mapped case we want the actual local UIDs, not their
                # remote counterparts.
                positive_uids = [uid for uid in list(partial.messagelist.keys()) if uid > 0]
                if len(positive_uids) > 0:
                    min_uid = min(positive_uids)
                else:
                    min_uid = 1
                save_min_uid(partial, min_uid)
        else:
            partial.cachemessagelist(min_uid=min_uid)


    remoterepos = account.remoterepos
    localrepos = account.localrepos
    statusrepos = account.statusrepos

    ui = getglobalui()
    ui.registerthread(account)
    try:
        # Load local folder.
        localfolder = account.get_local_folder(remotefolder)

        # Add the folder to the mbnames mailboxes.
        mbnames.add(account.name, localrepos.getlocalroot(),
            localfolder.getname())

        # Load status folder.
        statusfolder = statusrepos.getfolder(remotefolder.getvisiblename().
            replace(remoterepos.getsep(), statusrepos.getsep()))
        statusfolder.openfiles()
        statusfolder.cachemessagelist()

        # Load local folder.
        ui.syncingfolder(remoterepos, remotefolder, localrepos, localfolder)

        # Retrieve messagelists, taking into account age-restriction
        # options.
        maxage = localfolder.getmaxage()
        localstart = localfolder.getstartdate()
        remotestart = remotefolder.getstartdate()
        if (maxage != None) + (localstart != None) + (remotestart != None) > 1:
            six.reraise(OfflineImapError,
                        OfflineImapError("You can set at most one of the "
                            "following: maxage, startdate (for the local "
                            "folder), startdate (for the remote folder)",
                            OfflineImapError.ERROR.REPO),
                        exc_info()[2])
        if (maxage != None or localstart or remotestart) and quick:
            # IMAP quickchanged isn't compatible with options that
            # involve restricting the messagelist, since the "quick"
            # check can only retrieve a full list of UIDs in the folder.
            ui.warn("Quick syncs (-q) not supported in conjunction "
                "with maxage or startdate; ignoring -q.")
        if maxage != None:
            cachemessagelists_upto_date(localfolder, remotefolder, maxage)
        elif localstart != None:
            cachemessagelists_startdate(remotefolder, localfolder,
                localstart)
            check_uid_validity(localfolder, remotefolder, statusfolder)
        elif remotestart != None:
            cachemessagelists_startdate(localfolder, remotefolder,
                remotestart)
            check_uid_validity(localfolder, remotefolder, statusfolder)
        else:
            localfolder.cachemessagelist()
            if quick:
                if (not localfolder.quickchanged(statusfolder) and
                    not remotefolder.quickchanged(statusfolder)):
                    ui.skippingfolder(remotefolder)
                    localrepos.restore_atime()
                    return
            check_uid_validity(localfolder, remotefolder, statusfolder)
            remotefolder.cachemessagelist()

        # Synchronize remote changes.
        if not localrepos.getconfboolean('readonly', False):
            ui.syncingmessages(remoterepos, remotefolder, localrepos, localfolder)
            remotefolder.syncmessagesto(localfolder, statusfolder)
        else:
            ui.debug('', "Not syncing to read-only repository '%s'"%
                    localrepos.getname())

        # Synchronize local changes.
        if not remoterepos.getconfboolean('readonly', False):
            ui.syncingmessages(localrepos, localfolder, remoterepos, remotefolder)
            localfolder.syncmessagesto(remotefolder, statusfolder)
        else:
            ui.debug('', "Not syncing to read-only repository '%s'"%
                    remoterepos.getname())

        statusfolder.save()
        localrepos.restore_atime()
    except (KeyboardInterrupt, SystemExit):
        raise
    except OfflineImapError as e:
        # Bubble up severe Errors, skip folder otherwise.
        if e.severity > OfflineImapError.ERROR.FOLDER:
            raise
        else:
            ui.error(e, exc_info()[2], msg="Aborting sync, folder '%s' "
                     "[acc: '%s']"% (localfolder, account))
    except Exception as e:
        ui.error(e, msg="ERROR in syncfolder for %s folder %s: %s"%
            (account, remotefolder.getvisiblename(), traceback.format_exc()))
    finally:
        for folder in ["statusfolder", "localfolder", "remotefolder"]:
            if folder in locals():
                locals()[folder].dropmessagelistcache()
        statusfolder.closefiles()

Example 112

Project: openpathsampling Source File: dynamics_engine.py
    def iter_generate(self, initial, running=None, direction=+1,
                      intervals=10, max_length=0):
        r"""
        Return a generator that will generate a trajectory, returning the
        current trajectory in given intervals

        Parameters
        ----------
        initial : :class:`openpathsampling.Snapshot` or
        :class:`openpathsampling.Trajectory`
            initial coordinates and velocities in form of a Snapshot object
            or a trajectory
        running : (list of)
        function(:class:`openpathsampling.trajectory.Trajectory`)
            callable function of a 'Trajectory' that returns True or False.
            If one of these returns False the simulation is stopped.
        direction : -1 or +1 (DynamicsEngine.FORWARD or DynamicsEngine.BACKWARD)
            If +1 then this will integrate forward, if -1 it will reversed the
            momenta of the given snapshot and then prepending generated
            snapshots with reversed momenta. This will generate a _reversed_
            trajectory that effectively ends in the initial snapshot
        intervals : int
            number steps after which the current status is returned. If `0`
            it will run until the end or a keyboard interrupt is detected
        max_length : int
            will limit the simulation length to a number of steps. Default is
            `0` which will run unlimited

        Yields
        ------
        trajectory : :class:`openpathsampling.trajectory.Trajectory`
            generated trajectory of initial conditions, including initial
            coordinate set

        Notes
        -----
        If the returned trajectory has length n_frames_max it can still happen
        that it stopped because of the stopping criterion. You need to check
        in that case.
        """

        if direction == 0:
            raise RuntimeError(
                'direction must be positive (FORWARD) or negative (BACKWARD).')

        try:
            iter(running)
        except TypeError:
            running = [running]

        if hasattr(initial, '__iter__'):
            initial = Trajectory(initial)
        else:
            initial = Trajectory([initial])

        valid = False
        attempt_nan = 0
        attempt_error = 0
        attempt_max_length = 0
        trajectory = initial

        final_error = None
        errors = []

        while not valid and final_error is None:
            if attempt_nan + attempt_error > 1:
                # let's get a new initial trajectory the way the user wants to
                if self.on_retry == 'full':
                    trajectory = initial
                elif self.on_retry == 'remove_interval':
                    trajectory = \
                        trajectory[:max(
                            len(initial),
                            len(trajectory) - intervals)]
                elif self.on_retry == 'keep_half':
                    trajectory = \
                        trajectory[:min(
                            int(len(trajectory) * 0.9),
                            max(
                                len(initial),
                                len(trajectory) / 2))]
                elif hasattr(self.on_retry, '__call__'):
                    trajectory = self.on_retry(trajectory)

            if direction > 0:
                self.current_snapshot = trajectory[-1]
            elif direction < 0:
                # backward simulation needs reversed snapshots
                self.current_snapshot = trajectory[0].reversed

            logger.info("Starting trajectory")
            self.start()

            frame = 0
            # maybe we should stop before we even begin?
            stop = self.stop_conditions(trajectory=trajectory,
                                        continue_conditions=running,
                                        trusted=False)

            log_rate = 10
            has_nan = False
            has_error = False

            while not stop:
                if intervals > 0 and frame % intervals == 0:
                    # return the current status
                    logger.info("Through frame: %d", frame)
                    yield trajectory

                elif frame % log_rate == 0:
                    logger.info("Through frame: %d", frame)

                # Do integrator x steps

                snapshot = None

                try:
                    with DelayedInterrupt():
                        snapshot = self.generate_next_frame()

                        # if self.on_nan != 'ignore' and \
                        if not self.is_valid_snapshot(snapshot):
                            has_nan = True
                            break

                except KeyboardInterrupt as e:
                    # make sure we will report the last state for
                    logger.info('Keyboard interrupt. Shutting down simulation')
                    final_error = e
                    break

                except:
                    # any other error we start a retry
                    e = sys.exc_info()
                    errors.append(e)
                    se = str(e).lower()
                    if 'nan' in se and \
                            ('particle' in se or 'coordinates' in se):
                        # this cannot be ignored because we cannot continue!
                        has_nan = True
                        break
                    else:
                        has_error = True
                        break

                frame += 1

                # Store snapshot and add it to the trajectory.
                # Stores also final frame the last time
                if direction > 0:
                    trajectory.append(snapshot)
                elif direction < 0:
                    trajectory.insert(0, snapshot.reversed)

                if 0 < max_length < len(trajectory):
                    # hit the max length criterion
                    on = self.on_max_length
                    del trajectory[-1]

                    if on == 'fail':
                        final_error = EngineMaxLengthError(
                            'Hit maximal length of %d frames.' %
                            self.options['n_frames_max'],
                            trajectory
                        )
                        break
                    elif on == 'stop':
                        logger.info('Trajectory hit max length. Stopping.')
                        # fail gracefully
                        stop = True
                    elif on == 'retry':
                        attempt_max_length += 1
                        if attempt_max_length > self.retries_when_max_length:
                            if self.on_nan == 'fail':
                                final_error = EngineMaxLengthError(
                                    'Failed to generate trajectory without '
                                    'hitting max length after %d attempts' %
                                    attempt_max_length,
                                    trajectory)
                                break

                if stop is False:
                    # Check if we should stop. If not, continue simulation
                    stop = self.stop_conditions(trajectory=trajectory,
                                            continue_conditions=running)

            if has_nan:
                on = self.on_nan
                if on == 'fail':
                    final_error = EngineNaNError(
                        '`nan` in snapshot', trajectory)
                elif on == 'retry':
                    attempt_nan += 1
                    if attempt_nan > self.retries_when_nan:
                        final_error = EngineNaNError(
                            'Failed to generate trajectory without `nan` '
                            'after %d attempts' % attempt_error,
                            trajectory)

            elif has_error:
                on = self.on_nan
                if on == 'fail':
                    final_error = errors[-1][1]
                    del errors[-1]
                elif on == 'retry':
                    attempt_error += 1
                    if attempt_error > self.retries_when_error:
                        final_error = EngineError(
                            'Failed to generate trajectory without `nan` '
                            'after %d attempts' % attempt_error,
                            trajectory)

            elif stop:
                valid = True

            self.stop(trajectory)

        if errors:
            logger.info('Errors occurred during generation :')
            for no, e in enumerate(errors):
                logger.info('[#%d] %s' % (no, repr(e[1])))

        if final_error is not None:
            yield trajectory
            logger.info("Through frame: %d", len(trajectory))
            raise final_error

        logger.info("Finished trajectory, length: %d", len(trajectory))
        yield trajectory
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected