Here are the examples of the python api requests.head.url taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
4 Examples
0
Example 1
Project: coala-bears Source File: InvalidLinkBear.py
@deprecate_settings(link_ignore_regex='ignore_regex')
def run(self, filename, file,
timeout: int=DEFAULT_TIMEOUT,
link_ignore_regex: str="([.\/]example\.com|\{|\$)",
follow_redirects: bool=False):
"""
Find links in any text file and check if they are valid.
A link is considered valid if the server responds with a 2xx code.
This bear can automatically fix redirects, but ignores redirect
URLs that have a huge difference with the original URL.
Warning: This bear will make HEAD requests to all URLs mentioned in
your codebase, which can potentially be destructive. As an example,
this bear would naively just visit the URL from a line that goes like
`do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out
all your data.
:param timeout: Request timeout period.
:param link_ignore_regex: A regex for urls to ignore.
:param follow_redirects: Set to true to autocorrect redirects.
"""
for line_number, link, code in InvalidLinkBear.find_links_in_file(
file, timeout, link_ignore_regex):
if code is None:
yield Result.from_values(
origin=self,
message=('Broken link - unable to connect to '
'{url}').format(url=link),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.MAJOR)
elif not 200 <= code < 300:
# HTTP status 404, 410 or 50x
if code in (404, 410) or 500 <= code < 600:
yield Result.from_values(
origin=self,
message=('Broken link - unable to connect to {url} '
'(HTTP Error: {code})'
).format(url=link, code=code),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.NORMAL)
if follow_redirects and 300 <= code < 400: # HTTP status 30x
redirect_url = requests.head(link,
allow_redirects=True).url
matcher = SequenceMatcher(
None, redirect_url, link)
if (matcher.real_quick_ratio() > 0.7 and
matcher.ratio()) > 0.7:
diff = Diff(file)
current_line = file[line_number - 1]
start = current_line.find(link)
end = start + len(link)
replacement = current_line[:start] + \
redirect_url + current_line[end:]
diff.change_line(line_number,
current_line,
replacement)
yield Result.from_values(
self,
'This link redirects to ' + redirect_url,
diffs={filename: diff},
file=filename,
line=line_number,
severity=RESULT_SEVERITY.NORMAL)
0
Example 2
Project: orochi Source File: player.py
def load(self, path):
"""Load a file and play it.
Args:
path:
The path (url or filepath) to the file which should be played.
"""
# Resolve any redirects
url = requests.head(path, allow_redirects=True).url
# Fix https URLs, which are not supported by mplayer
if url.startswith('https:'):
url = 'http:' + url[6:]
# Stop previously started background threads
self._stop_background_thread()
# Load file, wait for command to finish
self._send_command('loadfile {}', url)
start = time.time()
while 1:
if 'CPLAYER: Starting playback...' in self.p.read():
break
if time.time() - start > self.timeout: # TODO use sigalarm or sigusr2 instead
self.terminate()
raise RuntimeError("Playback didn't start within {}s. ".format(self.timeout) +
"Something must have gone wrong. Are you experiencing network problems?")
time.sleep(0.1)
# Start a background thread that checks the playback status
def playback_status(process, stop_event, write_lock, pausing_keep):
"""Poll mplayer process for time_pos song and ending.
When song has ended, send a SIGUSR1 signal. When time_pos is larger
than 30s, send a SIGUSR2 signal to report the song.
When ``stop_event`` is set, exit thread.
"""
reported = False
time_pos_rex = re.compile(r'GLOBAL: ANS_TIME_POSITION=([0-9]+\.[0-9]+)')
while not stop_event.is_set():
if not reported:
with write_lock:
process.write('{} get_time_pos\n'.format(pausing_keep))
stdout = process.read()
if stdout:
if 'GLOBAL: EOF code: 1' in stdout:
os.kill(os.getpid(), signal.SIGUSR1)
if not reported:
match = time_pos_rex.search(stdout)
if match and float(match.group(1)) >= 30:
os.kill(os.getpid(), signal.SIGUSR2)
reported = True
stop_event.wait(0.5)
self.t_stop = threading.Event()
thread_args = (self.p, self.t_stop, self.write_lock, self.pausing_keep)
self.t = threading.Thread(target=playback_status, args=thread_args)
self.t.daemon = True
self.t.start()
0
Example 3
Project: rtv Source File: __main__.py
def main():
"""Main entry point"""
# Squelch SSL warnings
logging.captureWarnings(True)
if six.PY3:
# These ones get triggered even when capturing warnings is turned on
warnings.simplefilter('ignore', ResourceWarning) #pylint:disable=E0602
# Set the terminal title
if os.getenv('DISPLAY'):
title = 'rtv {0}'.format(__version__)
sys.stdout.write('\x1b]2;{0}\x07'.format(title))
sys.stdout.flush()
args = Config.get_args()
fargs, bindings = Config.get_file(args.get('config'))
# Apply the file config first, then overwrite with any command line args
config = Config()
config.update(**fargs)
config.update(**args)
# If key bindings are supplied in the config file, overwrite the defaults
if bindings:
config.keymap.set_bindings(bindings)
# Copy the default config file and quit
if config['copy_config']:
copy_default_config()
return
if config['copy_mailcap']:
copy_default_mailcap()
return
# Load the browsing history from previous sessions
config.load_history()
# Load any previously saved auth session token
config.load_refresh_token()
if config['clear_auth']:
config.delete_refresh_token()
if config['log']:
# Log request headers to the file (print hack only works on python 3.x)
# from http import client
# _http_logger = logging.getLogger('http.client')
# client.HTTPConnection.debuglevel = 2
# def print_to_file(*args, **_):
# if args[0] != "header:":
# _http_logger.info(' '.join(args))
# client.print = print_to_file
logging.basicConfig(
level=logging.DEBUG,
filename=config['log'],
format='%(asctime)s:%(levelname)s:%(filename)s:%(lineno)d:%(message)s')
_logger.info('Starting new session, RTV v%s', __version__)
env = [
('$DISPLAY', os.getenv('DISPLAY')),
('$XDG_CONFIG_HOME', os.getenv('XDG_CONFIG_HOME')),
('$BROWSER', os.getenv('BROWSER')),
('$PAGER', os.getenv('PAGER')),
('$RTV_EDITOR', os.getenv('RTV_EDITOR')),
('$RTV_URLVIEWER', os.getenv('RTV_URLVIEWER'))]
_logger.info('Environment: %s', env)
else:
# Add an empty handler so the logger doesn't complain
logging.root.addHandler(logging.NullHandler())
# Make sure the locale is UTF-8 for unicode support
locale.setlocale(locale.LC_ALL, '')
encoding = locale.getlocale()[1] or locale.getdefaultlocale()[1]
if not encoding or encoding.lower() != 'utf-8':
text = ('System encoding was detected as (%s) instead of UTF-8'
', falling back to ascii only mode' % encoding)
warnings.warn(text)
config['ascii'] = True
# Construct the reddit user agent
user_agent = docs.AGENT.format(version=__version__)
try:
with curses_session() as stdscr:
# Initialize global color-pairs with curses
if not config['monochrome']:
Color.init()
term = Terminal(stdscr, config)
with term.loader('Initializing', catch_exception=False):
reddit = praw.Reddit(user_agent=user_agent,
decode_html_entities=False,
disable_update_check=True)
# Authorize on launch if the refresh token is present
oauth = OAuthHelper(reddit, term, config)
if config.refresh_token:
oauth.authorize()
name = config['subreddit']
with term.loader('Loading subreddit'):
page = SubredditPage(reddit, term, config, oauth, name)
if term.loader.exception:
return
# Open the supplied submission link before opening the subreddit
if config['link']:
# Expand shortened urls like https://redd.it/
# Praw won't accept the shortened versions, add the reddit
# headers to avoid a 429 response from reddit.com
url = requests.head(config['link'], headers=reddit.http.headers,
allow_redirects=True).url
page.open_submission(url=url)
# Launch the subreddit page
page.loop()
except ConfigError as e:
_logger.exception(e)
print(e)
except Exception as e:
_logger.exception(e)
raise
except KeyboardInterrupt:
pass
finally:
# Try to save the browsing history
config.save_history()
# Ensure sockets are closed to prevent a ResourceWarning
if 'reddit' in locals():
reddit.handler.http.close()
0
Example 4
Project: unisubs Source File: brightcove.py
def _resolve_url_redirects(self, url):
return requests.head(url, allow_redirects=True).url