Here are the examples of the python api scrapy.exceptions.NotSupported taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
3 Examples
3
Example 1
Project: scrapy Source File: __init__.py
def download_request(self, request, spider):
scheme = urlparse_cached(request).scheme
handler = self._get_handler(scheme)
if not handler:
raise NotSupported("Unsupported URL scheme '%s': %s" %
(scheme, self._notconfigured[scheme]))
return handler.download_request(request, spider)
3
Example 2
Project: scrapy Source File: feed.py
def parse(self, response):
if not hasattr(self, 'parse_node'):
raise NotConfigured('You must define parse_node method in order to scrape this XML feed')
response = self.adapt_response(response)
if self.iterator == 'iternodes':
nodes = self._iternodes(response)
elif self.iterator == 'xml':
selector = Selector(response, type='xml')
self._register_namespaces(selector)
nodes = selector.xpath('//%s' % self.itertag)
elif self.iterator == 'html':
selector = Selector(response, type='html')
self._register_namespaces(selector)
nodes = selector.xpath('//%s' % self.itertag)
else:
raise NotSupported('Unsupported node iterator')
return self.parse_nodes(response, nodes)
0
Example 3
def __init__(self, *args, **kw):
raise NotSupported('HTTP1.1 not supported')