Here are the examples of the python api requests_cache.disabled taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
5 Examples
3
Example 1
def main():
# Once cached, delayed page will be taken from cache
# redirects also handled
for i in range(5):
requests.get('http://httpbin.org/delay/2')
r = requests.get('http://httpbin.org/redirect/5')
print(r.text)
# And if we need to get fresh page or don't want to cache it?
with requests_cache.disabled():
print(requests.get('http://httpbin.org/ip').text)
# Debugging info about cache
print(requests_cache.get_cache())
3
Example 2
Project: paasta Source File: paasta_serviceinit_steps.py
@then(u'marathon_serviceinit status_marathon_job should return "{status}" for "{job_id}"')
def status_marathon_job(context, status, job_id):
normal_instance_count = 1
(service, instance, _, __) = decompose_job_id(job_id)
app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']
with requests_cache.disabled():
output = marathon_serviceinit.status_marathon_job(
service,
instance,
app_id,
normal_instance_count,
context.marathon_client
)
assert status in output
0
Example 3
Project: paasta Source File: itest_utils.py
@timeout()
def wait_for_app_to_launch_tasks(client, app_id, expected_tasks, exact_matches_only=False):
""" Wait for an app to have num_tasks tasks launched. If the app isn't found, then this will swallow the exception
and retry. Times out after 30 seconds.
:param client: The marathon client
:param app_id: The app id to which the tasks belong
:param expected_tasks: The number of tasks to wait for
:param exact_matches_only: a boolean indicating whether we require exactly expected_tasks to be running
"""
found = False
with requests_cache.disabled():
while not found:
try:
found = app_has_tasks(client, app_id, expected_tasks, exact_matches_only)
except NotFoundError:
pass
if found:
time.sleep(3) # Give it a bit more time to actually launch
return
else:
print "waiting for app %s to have %d tasks. retrying" % (app_id, expected_tasks)
time.sleep(0.5)
0
Example 4
Project: paasta Source File: cleanup_marathon_job_steps.py
@then(u'we should not see it in the list of apps')
def not_see_it_in_list(context):
with requests_cache.disabled():
assert context.app_id not in marathon_tools.list_all_marathon_app_ids(context.marathon_client)
0
Example 5
Project: paasta Source File: setup_marathon_job.py
def do_bounce(
bounce_func,
drain_method,
config,
new_app_running,
happy_new_tasks,
old_app_live_happy_tasks,
old_app_live_unhappy_tasks,
old_app_draining_tasks,
old_app_at_risk_tasks,
service,
bounce_method,
serviceinstance,
cluster,
instance,
marathon_jobid,
client,
soa_dir,
bounce_margin_factor=1.0,
):
def log_bounce_action(line, level='debug'):
return _log(
service=service,
line=line,
component='deploy',
level=level,
cluster=cluster,
instance=instance
)
# log if we're not in a steady state.
if any([
(not new_app_running),
old_app_live_happy_tasks.keys()
]):
log_bounce_action(
line=' '.join([
'%s bounce in progress on %s.' % (bounce_method, serviceinstance),
'New marathon app %s %s.' % (marathon_jobid, ('exists' if new_app_running else 'not created yet')),
'%d new tasks to bring up.' % (config['instances'] - len(happy_new_tasks)),
'%d old tasks receiving traffic and happy.' % len(bounce_lib.flatten_tasks(old_app_live_happy_tasks)),
'%d old tasks unhappy.' % len(bounce_lib.flatten_tasks(old_app_live_unhappy_tasks)),
'%d old tasks draining.' % len(bounce_lib.flatten_tasks(old_app_draining_tasks)),
'%d old tasks at risk.' % len(bounce_lib.flatten_tasks(old_app_at_risk_tasks)),
'%d old apps.' % len(old_app_live_happy_tasks.keys()),
]),
level='event',
)
else:
log.debug("Nothing to do, bounce is in a steady state")
actions = bounce_func(
new_config=config,
new_app_running=new_app_running,
happy_new_tasks=happy_new_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
margin_factor=bounce_margin_factor,
)
if actions['create_app'] and not new_app_running:
log_bounce_action(
line='%s bounce creating new app with app_id %s' % (bounce_method, marathon_jobid),
)
with requests_cache.disabled():
bounce_lib.create_marathon_app(marathon_jobid, config, client)
tasks_to_kill = drain_tasks_and_find_tasks_to_kill(
tasks_to_drain=actions['tasks_to_drain'],
already_draining_tasks=bounce_lib.flatten_tasks(old_app_draining_tasks),
drain_method=drain_method,
log_bounce_action=log_bounce_action,
bounce_method=bounce_method,
at_risk_tasks=bounce_lib.flatten_tasks(old_app_at_risk_tasks),
)
kill_given_tasks(client=client, task_ids=[task.id for task in tasks_to_kill], scale=True)
for task in bounce_lib.flatten_tasks(old_app_at_risk_tasks):
if task in tasks_to_kill:
hostname = task.host
reserve_all_resources([hostname])
apps_to_kill = []
for app in old_app_live_happy_tasks.keys():
if app != '/%s' % marathon_jobid:
live_happy_tasks = old_app_live_happy_tasks[app]
live_unhappy_tasks = old_app_live_unhappy_tasks[app]
draining_tasks = old_app_draining_tasks[app]
at_risk_tasks = old_app_at_risk_tasks[app]
if 0 == len((live_happy_tasks | live_unhappy_tasks | draining_tasks | at_risk_tasks) - tasks_to_kill):
apps_to_kill.append(app)
if apps_to_kill:
log_bounce_action(
line='%s bounce removing old unused apps with app_ids: %s' %
(
bounce_method,
', '.join(apps_to_kill)
),
)
with requests_cache.disabled():
bounce_lib.kill_old_ids(apps_to_kill, client)
all_old_tasks = set.union(set(), *old_app_live_happy_tasks.values())
all_old_tasks = set.union(all_old_tasks, *old_app_live_unhappy_tasks.values())
all_old_tasks = set.union(all_old_tasks, *old_app_draining_tasks.values())
all_old_tasks = set.union(all_old_tasks, *old_app_at_risk_tasks.values())
# log if we appear to be finished
if all([
(apps_to_kill or tasks_to_kill),
apps_to_kill == old_app_live_happy_tasks.keys(),
tasks_to_kill == all_old_tasks,
]):
log_bounce_action(
line='%s bounce on %s finishing. Now running %s' %
(
bounce_method,
serviceinstance,
marathon_jobid
),
level='event',
)