Here are the examples of the python api urllib.urlencode taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
149 Examples
0
Example 51
Project: wammu Source File: TalkbackDialog.py
def Okay(self, evt):
connection = self.connection_combo_box.GetValue()
if connection == self.ns_string:
connection = 'NULL'
if len(self.features) == 0 and connection != 'NULL':
wx.MessageDialog(
self,
_('Entry in Gammu Phone Database was not created, following fields are invalid:\n%s') % _('Supported features'),
_('Entry not created!'),
wx.OK | wx.ICON_ERROR
).ShowModal()
return
elif len(self.features) != 0 and connection == 'NULL':
wx.MessageDialog(
self,
_('Entry in Gammu Phone Database was not created, following fields are invalid:\n%s') % _('Supported features'),
_('Entry not created!'),
wx.OK | wx.ICON_ERROR
).ShowModal()
return
man_str = self.manufacturer_choice.GetStringSelection()
try:
man_id = Wammu.Data.ManufacturerMap[man_str]
except:
wx.MessageDialog(
self,
_('Entry in Gammu Phone Database was not created, following fields are invalid:\n%s') % _('Manufacturer'),
_('Entry not created!'),
wx.OK | wx.ICON_ERROR
).ShowModal()
return
garble_id = self.mangle_choice.GetSelection()
try:
garble_text = Wammu.Data.GarbleMap[garble_id]
except:
wx.MessageDialog(
self,
_('Entry in Gammu Phone Database was not created, following fields are invalid:\n%s') % _('Email displaying'),
_('Entry not created!'),
wx.OK | wx.ICON_ERROR
).ShowModal()
return
# Remember user information for next run
self.wammu_cfg.Write('/User/Name', self.name_text_ctrl.GetValue())
self.wammu_cfg.Write('/User/Email', self.email_text_ctrl.GetValue())
# Prepare data to post
params_dict = {
'irobot': 'wammu',
'version': '2',
'manufacturer': man_id,
'name': self.model_text_ctrl.GetValue(),
'model': self.model_combo_box.GetValue(),
'connection': connection,
'note': self.note_text_ctrl.GetValue(),
'author_name': self.name_text_ctrl.GetValue(),
'author_email': self.email_text_ctrl.GetValue(),
'email_garble': garble_text,
'gammu_version': gammu.Version()[0],
}
for x in self.features:
params_dict['fts[%s]' % x] = 1
# Convert unicode to raw utf-8 strigns so that they can be properly
# handled by urllib and later by website
for x in params_dict.keys():
if type(params_dict[x]) == unicode:
params_dict[x] = params_dict[x].encode('utf-8')
# Encode request and prepare headers
params = urllib.urlencode(params_dict)
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'
}
# Perform request
conn = httplib.HTTPConnection('wammu.eu')
try:
conn.request('POST', '/api/phones/new/', params, headers)
# Check request response
response = conn.getresponse()
if response.status != 200:
wx.MessageDialog(
self,
_('HTTP request failed with status %(code)d (%(text)s), please retry later or create entry manually.') % {
'code': response.status,
'text': response.reason,
},
_('Entry not created!'),
wx.OK | wx.ICON_ERROR
).ShowModal()
return
except Exception, e:
if hasattr(e, 'message') and e.message != '':
msg = e.message
elif hasattr(e, 'args') and len(e.args) > 0:
msg = e.args[-1]
else:
msg = str(e)
wx.MessageDialog(
self,
_('HTTP request failed with exception:\n%(exception)s\nPlease retry later or create entry manually.') % {
'exception': StrConv(msg),
},
_('Entry not created!'),
wx.OK | wx.ICON_ERROR
).ShowModal()
return
# Verify acquired data
data = response.read()
conn.close()
ok_test = OK_MATCHER.match(data)
if ok_test is not None:
url = 'http://%swammu.eu%s' % (Wammu.Utils.GetWebsiteLang(), ok_test.groups()[1])
result = wx.MessageDialog(
self,
_('Entry in Gammu Phone Database has been created, you can see it on <%s> URL.\nDo you want to open it in browser now?') % url,
_('Entry created!'),
wx.YES_NO | wx.ICON_INFORMATION
).ShowModal()
if result == wx.ID_YES:
Wammu.Webbrowser.Open(url)
self.wammu_cfg.Write('/Wammu/TalkbackDone', 'yes')
self.EndModal(wx.ID_OK)
fail_test = FAIL_MATCHER.match(data)
if fail_test is not None:
wrong_fields = fail_test.groups()[0].split(',')
fields_msg = ''
for field in wrong_fields:
if field == 'manufacturer':
fields_msg += _('Manufacturer') + '\n'
elif field == 'name':
fields_msg += _('Phone model') + '\n'
elif field == 'model':
fields_msg += _('Model in gammu configuration') + '\n'
elif field == 'connection':
fields_msg += _('Connection type') + '\n'
elif field == 'note':
fields_msg += _('Note') + '\n'
elif field == 'author_name':
fields_msg += _('Your name') + '\n'
elif field == 'author_email':
fields_msg += _('Your email') + '\n'
elif field == 'email_garble':
fields_msg += _('Email displaying') + '\n'
elif field == 'gammu_version':
fields_msg += _('Gammu version') + '\n'
else:
fields_msg += _('Field: %s') % field + '\n'
wx.MessageDialog(
self,
_('Entry in Gammu Phone Database was not created, following fields are invalid:\n%s') % fields_msg,
_('Entry not created!'),
wx.OK | wx.ICON_ERROR
).ShowModal()
return
0
Example 52
Project: OdooQuant Source File: xqtrader.py
def __trade(self, stock_code, price=0, amount=0, volume=0, entrust_bs='buy'):
"""
调仓
:param stock_code:
:param price:
:param amount:
:param volume:
:param entrust_bs:
:return:
"""
stock = self.__search_stock_info(stock_code)
balance = self.get_balance()[0]
if stock == None:
raise TraderError(u"没有查询要操作的股票信息")
if not volume:
volume = price * amount # 可能要取整数
if balance['current_balance'] < volume and entrust_bs == 'buy':
raise TraderError(u"没有足够的现金进行操作")
if stock['flag'] != 1:
raise TraderError(u"未上市、停牌、涨跌停、退市的股票无法操作。")
if volume==0:
raise TraderError(u"操作金额不能为零")
# 计算调仓调仓份额
weight = volume / balance['asset_balance'] * 100
weight = round(weight, 2)
# 获取原有仓位信息
position_list = self.__get_position()
# 调整后的持仓
is_have = False
for position in position_list:
if position['stock_id'] == stock['stock_id']:
is_have = True
position['proactive'] = True
old_weight = position['weight']
if entrust_bs == 'buy':
position['weight'] = weight + old_weight
else:
if weight > old_weight:
raise TraderError(u"操作数量大于实际可卖出数量")
else:
position['weight'] = old_weight - weight
if not is_have:
if entrust_bs == 'buy':
position_list.append({
"code": stock['code'],
"name": stock['name'],
"enName": stock['enName'],
"hasexist": stock['hasexist'],
"flag": stock['flag'],
"type": stock['type'],
"current": stock['current'],
"chg": stock['chg'],
"percent": str(stock['percent']),
"stock_id": stock['stock_id'],
"ind_id": stock['ind_id'],
"ind_name": stock['ind_name'],
"ind_color": stock['ind_color'],
"textname": stock['name'],
"segment_name": stock['ind_name'],
"weight": weight,
"url": "/S/" + stock['code'],
"proactive": True,
"price": str(stock['current'])
})
else:
raise TraderError(u"没有持有要卖出的股票")
if entrust_bs == 'buy':
cash = (balance['current_balance'] - volume) / balance['asset_balance'] * 100
else:
cash = (balance['current_balance'] + volume) / balance['asset_balance'] * 100
cash = round(cash, 2)
log.debug("weight:%f, cash:%f" % (weight, cash))
data = {
"cash": cash,
"holdings": str(json.dumps(position_list)),
"cube_symbol": str(self.account_config['portfolio_code']),
'segment': 1,
'comment': ""
}
if six.PY2:
data = (urllib.urlencode(data))
else:
data = (urllib.parse.urlencode(data))
self.headers['Referer'] = self.config['referer'] % self.account_config['portfolio_code']
try:
rebalance_res = self.requests.session().post(self.config['rebalance_url'], headers=self.headers,
cookies=self.cookies,
params=data)
except Exception as e:
log.warn('调仓失败: %s ' % e)
return
else:
log.debug('调仓 %s%s: %d' % (entrust_bs, stock['name'], rebalance_res.status_code))
rebalance_status = json.loads(rebalance_res.text)
if 'error_description' in rebalance_status.keys() and rebalance_res.status_code != 200:
log.error('调仓错误: %s' % (rebalance_status['error_description']))
return [{'error_no': rebalance_status['error_code'],
'error_info': rebalance_status['error_description']}]
else:
return [{'entrust_no': rebalance_status['id'],
'init_date': self.__time_strftime(rebalance_status['created_at']),
'batch_no': '委托批号',
'report_no': '申报号',
'seat_no': '席位编号',
'entrust_time': self.__time_strftime(rebalance_status['updated_at']),
'entrust_price': price,
'entrust_amount': amount,
'stock_code': stock_code,
'entrust_bs': '买入',
'entrust_type': '雪球虚拟委托',
'entrust_status': '-'}]
0
Example 53
def _request(self, request, auth_required=True):
"""
Make an HTTP(S) request to an API endpoint based on what's specified in the
request object passed
## Input
Required request keys:
api
Either REST or SOAP
call
Name of the SOAP method or relative path of the REST URL
Optional keys:
query
Contents of the query string passed as a dict
data
Data to post. For SOAP API calls this will be the SOAP envelope. For
REST API calls this will be a dict converted to JSON automatically
by this method
use_cookie_auth
Whether or not to use an HTTP Cookie in lieu of a querystring for authorization
## Output
Returns a dict:
status
Number HTTP status code returned by the response, if any
raw
The raw contents of the response, if any
data
A python dict representing the data contained in the response, if any
"""
for required_key in [
'api',
'call'
]:
if not request.has_key(required_key) and request[required_key]:
self.log("All requests are required to have a key [{}] with a value".format(required_key), level='critical')
return None
url = None
if request['api'] == self.API_TYPE_REST:
url = "{}/{}".format(self._rest_api_endpoint, request['call'].lstrip('/'))
else:
url = self._soap_api_endpoint
self.log("Making a request to {}".format(url), level='debug')
# add the authentication parameters
if auth_required:
if request['api'] == self.API_TYPE_REST:
if not request['use_cookie_auth']: # sID is a query string
if not request['query']: request['query'] = {}
request['query']['sID'] = self._sessions[self.API_TYPE_REST]
elif request['api'] == self.API_TYPE_SOAP:
# sID is part of the data
if not request['data']: request['data'] = {}
request['data']['sID'] = self._sessions[self.API_TYPE_SOAP]
# remove any blank request keys
for k, v in request.items():
if not v: request[k] = None
# prep the query string
if request.has_key('query') and request['query']:
# get with query string
qs = {}
for k, v in request['query'].items(): # strip out null entries
if v: qs[k] = v
url += '?%s' % urllib.urlencode(qs)
self.log("Added query string. Full URL is now {}".format(url), level='debug')
self.log("URL to request is: {}".format(url))
# Prep the SSL context
ssl_context = ssl.create_default_context()
if self.ignore_ssl_validation:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
self.log("SSL certificate validation has been disabled for this call", level='warning')
# Prep the URL opener
url_opener = urllib2.build_opener(urllib2.HTTPSHandler(context=ssl_context))
# Prep the request
request_type = 'GET'
headers = {
'Accept': 'application/json,text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*',
'Content-Type': 'application/json',
}
# authentication calls don't accept the Accept header
if request['call'].startswith('authentication'): del(headers['Accept'])
# some rest calls use a cookie to pass the sID
if request['api'] == self.API_TYPE_REST and request['use_cookie_auth']:
headers['Cookie'] = 'sID="{}"'.format(self._sessions[self.API_TYPE_REST])
if request['api'] == self.API_TYPE_REST and request['call'] in [
'apiVersion',
'status/manager/ping'
]:
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*',
'Content-Type': 'text/plain',
}
if request['api'] == self.API_TYPE_SOAP:
# always a POST
headers = {
'SOAPAction': '',
'content-type': 'application/soap+xml'
}
data = self._prep_data_for_soap(request['call'], request['data'])
url_request = urllib2.Request(url, data=data, headers=headers)
request_type = 'POST'
self.log("Making a SOAP request with headers {}".format(headers), level='debug')
self.log(" and data {}".format(data), level='debug')
elif request['call'] == 'authentication/logout':
url_request = urllib2.Request(url, headers=headers)
setattr(url_request, 'get_method', lambda: 'DELETE') # make this request use the DELETE HTTP verb
request_type = 'DELETE'
self.log("Making a REST DELETE request with headers {}".format(headers), level='debug')
elif request.has_key('data') and request['data']:
# POST
url_request = urllib2.Request(url, data=json.dumps(request['data']), headers=headers)
request_type = 'POST'
self.log("Making a REST POST request with headers {}".format(headers), level='debug')
self.log(" and data {}".format(request['data']), level='debug')
else:
# GET
url_request = urllib2.Request(url, headers=headers)
self.log("Making a REST GET request with headers {}".format(headers), level='debug')
# Make the request
response = None
try:
response = url_opener.open(url_request)
except Exception, url_err:
self.log("Failed to make {} {} call [{}]".format(request['api'].upper(), request_type, request['call'].lstrip('/')), err=url_err)
# Convert the request from JSON
result = {
'status': response.getcode() if response else None,
'raw': response.read() if response else None,
'headers': dict(response.headers) if response else dict(),
'data': None
}
bytes_of_data = len(result['raw']) if result['raw'] else 0
self.log("Call returned HTTP status {} and {} bytes of data".format(result['status'], bytes_of_data), level='debug')
if response:
if request['api'] == self.API_TYPE_SOAP:
# XML response
try:
if result['raw']:
full_data = xmltodict.parse(result['raw'])
if full_data.has_key('soapenv:Envelope') and full_data['soapenv:Envelope'].has_key('soapenv:Body'):
result['data'] = full_data['soapenv:Envelope']['soapenv:Body']
if result['data'].has_key('{}Response'.format(request['call'])):
if result['data']['{}Response'.format(request['call'])].has_key('{}Return'.format(request['call'])):
result['data'] = result['data']['{}Response'.format(request['call'])]['{}Return'.format(request['call'])]
else:
result['data'] = result['data']['{}Response'.format(request['call'])]
else:
result['data'] = full_data
except Exception, xmltodict_err:
self.log("Could not convert response from call {}".format(request['call']), err=xmltodict_err)
else:
# JSON response
try:
if result['raw'] and result['status'] != 204:
result['type'] = result['headers']['content-type']
result['data'] = json.loads(result['raw']) if 'json' in result['type'] else None
except Exception, json_err:
# report the exception as 'info' because it's not fatal and the data is
# still captured in result['raw']
self.log("Could not convert response from call {} to JSON. Threw exception:\n\t{}".format(request['call'], json_err), level='info')
return result
0
Example 54
Project: aemanager Source File: views.py
@csrf_exempt
@commit_on_success
def paypal_ipn(request):
# send back the response to paypal
data = dict(request.POST.items())
args = {'cmd': '_notify-validate'}
args.update(data)
params = urllib.urlencode(dict([k, v.encode('utf-8')] for k, v in args.items()))
paypal_response = urllib2.urlopen(settings.PAYPAL_URL + '/cgi-bin/webscr', params).read()
# process the payment
receiver_id = data['receiver_id']
transaction_id = data['txn_id']
payment_status = data['payment_status']
payment_amount = data['mc_gross']
payment_currency = data['mc_currency']
fee = data['mc_fee']
item_name = data['item_name']
user_id = data['custom']
user = get_object_or_404(User, pk=user_id)
profile = user.get_profile()
last_subscription = profile.get_last_subscription()
subscription, created = Subscription.objects.get_or_create(transaction_id=transaction_id,
defaults={'owner': user,
'state': SUBSCRIPTION_STATE_NOT_PAID,
'expiration_date': profile.get_next_expiration_date(),
'transaction_id': transaction_id,
'error_message': ugettext('Not verified')})
if paypal_response == 'VERIFIED':
if receiver_id <> settings.PAYPAL_RECEIVER_ID:
subscription.error_message = ugettext('Receiver is not as defined in settings. Spoofing ?')
elif payment_status <> 'Completed':
subscription.error_message = ugettext('Payment not completed')
elif payment_amount <> settings.PAYPAL_APP_SUBSCRIPTION_AMOUNT:
subscription.error_message = ugettext('Amount altered. Bad guy ?')
elif payment_currency <> settings.PAYPAL_APP_SUBSCRIPTION_CURRENCY:
subscription.error_message = ugettext('Amount altered. Bad guy ?')
else:
subscription.error_message = ugettext('Paid')
subscription.state = SUBSCRIPTION_STATE_PAID
# create an invoice for this payment
# first, get the provider user
provider = User.objects.get(email=settings.SERVICE_PROVIDER_EMAIL)
if provider.get_profile().vat_number:
payment_amount = Decimal(payment_amount) / Decimal('1.196')
# look for a customer corresponding to user
address, created = Address.objects.get_or_create(contact__email=user.email,
owner=provider,
defaults={'street': profile.address.street,
'zipcode': profile.address.zipcode,
'city': profile.address.city,
'country': profile.address.country,
'owner': provider})
customer, created = Contact.objects.get_or_create(email=user.email,
defaults={'contact_type': CONTACT_TYPE_COMPANY,
'name': '%s %s' % (user.first_name, user.last_name),
'company_id': profile.company_id,
'legal_form': 'Auto-entrepreneur',
'email': user.email,
'address': address,
'owner': provider})
# create a related project if needed
# set it to finished to clear daily business
project, created = Project.objects.get_or_create(state=PROJECT_STATE_FINISHED,
customer=customer,
name='Subscription %s - %s %s' % (Site.objects.get_current().name, user.first_name, user.last_name),
defaults={'state': PROJECT_STATE_FINISHED,
'customer': customer,
'name': 'Subscription %s - %s %s' % (Site.objects.get_current().name, user.first_name, user.last_name),
'owner': provider})
# create proposal for this subscription
begin_date = datetime.date.today()
if begin_date < last_subscription.expiration_date:
begin_date = last_subscription.expiration_date
proposal = Proposal.objects.create(project=project,
reference='subscription%i%i%i' % (subscription.expiration_date.year,
subscription.expiration_date.month,
subscription.expiration_date.day),
state=PROPOSAL_STATE_BALANCED,
begin_date=begin_date,
end_date=subscription.expiration_date,
contract_content='',
update_date=datetime.date.today(),
expiration_date=None,
owner=provider)
unit_price = Decimal(settings.PAYPAL_APP_SUBSCRIPTION_AMOUNT)
if provider.get_profile().vat_number:
unit_price = Decimal(unit_price) / Decimal('1.196')
proposal_row = ProposalRow.objects.create(proposal=proposal,
label=item_name,
category=ROW_CATEGORY_SERVICE,
quantity=1,
unit_price='%s' % unit_price,
owner=provider)
# finally create invoice
invoice = Invoice.objects.create(customer=customer,
invoice_id=Invoice.objects.get_next_invoice_id(provider),
state=INVOICE_STATE_PAID,
amount=payment_amount,
edition_date=datetime.date.today(),
payment_date=datetime.date.today(),
paid_date=datetime.date.today(),
payment_type=PAYMENT_TYPE_BANK_CARD,
execution_begin_date=begin_date,
execution_end_date=subscription.expiration_date,
penalty_date=None,
penalty_rate=None,
discount_conditions=None,
owner=provider)
invoice_row = InvoiceRow.objects.create(proposal=proposal,
invoice=invoice,
label=item_name,
category=ROW_CATEGORY_SERVICE,
quantity=1,
unit_price=payment_amount,
balance_payments=True,
vat_rate=VAT_RATES_19_6,
owner=provider)
# create expense for paypal fee
expense = Expense.objects.create(date=datetime.date.today(),
reference=transaction_id,
supplier='Paypal',
amount=fee,
payment_type=PAYMENT_TYPE_BANK_CARD,
description='Commission paypal',
owner=provider)
# generate invoice in pdf
response = HttpResponse(mimetype='application/pdf')
invoice.to_pdf(provider, response)
subject_template = loader.get_template('core/subscription_paid_email_subject.html')
subject_context = {'site_name': Site.objects.get_current().name}
subject = subject_template.render(Context(subject_context))
body_template = loader.get_template('core/subscription_paid_email.html')
body_context = {'site_name': Site.objects.get_current().name,
'expiration_date': subscription.expiration_date}
body = body_template.render(Context(body_context))
email = EmailMessage(subject=subject,
body=body,
to=[user.email])
email.attach('facture_%i.pdf' % (invoice.invoice_id), response.content, 'application/pdf')
email.send(fail_silently=(not settings.DEBUG))
subscription.save()
return render_to_response('core/paypal_ipn.html',
{'active': 'account',
'title': _('Subscribe')},
context_instance=RequestContext(request))
0
Example 55
Project: edx-platform Source File: views.py
@method_decorator(login_required)
@method_decorator(transaction.atomic)
def get(self, request, course_id, error=None):
"""Displays the course mode choice page.
Args:
request (`Request`): The Django Request object.
course_id (unicode): The slash-separated course key.
Keyword Args:
error (unicode): If provided, display this error message
on the page.
Returns:
Response
"""
course_key = CourseKey.from_string(course_id)
# Check whether the user has access to this course
# based on country access rules.
embargo_redirect = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if embargo_redirect:
return redirect(embargo_redirect)
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(request.user, course_key)
modes = CourseMode.modes_for_course_dict(course_key)
ecommerce_service = EcommerceService()
# We assume that, if 'professional' is one of the modes, it should be the *only* mode.
# If there are both modes, default to non-id-professional.
has_enrolled_professional = (CourseMode.is_professional_slug(enrollment_mode) and is_active)
if CourseMode.has_professional_mode(modes) and not has_enrolled_professional:
purchase_workflow = request.GET.get("purchase_workflow", "single")
verify_url = reverse('verify_student_start_flow', kwargs={'course_id': unicode(course_key)})
redirect_url = "{url}?purchase_workflow={workflow}".format(url=verify_url, workflow=purchase_workflow)
if ecommerce_service.is_enabled(request.user):
professional_mode = modes.get(CourseMode.NO_ID_PROFESSIONAL_MODE) or modes.get(CourseMode.PROFESSIONAL)
if purchase_workflow == "single" and professional_mode.sku:
redirect_url = ecommerce_service.checkout_page_url(professional_mode.sku)
if purchase_workflow == "bulk" and professional_mode.bulk_sku:
redirect_url = ecommerce_service.checkout_page_url(professional_mode.bulk_sku)
return redirect(redirect_url)
# If there isn't a verified mode available, then there's nothing
# to do on this page. The user has almost certainly been auto-registered
# in the "honor" track by this point, so we send the user
# to the dashboard.
if not CourseMode.has_verified_mode(modes):
return redirect(reverse('dashboard'))
# If a user has already paid, redirect them to the dashboard.
if is_active and (enrollment_mode in CourseMode.VERIFIED_MODES + [CourseMode.NO_ID_PROFESSIONAL_MODE]):
return redirect(reverse('dashboard'))
donation_for_course = request.session.get("donation_for_course", {})
chosen_price = donation_for_course.get(unicode(course_key), None)
course = modulestore().get_course(course_key)
if CourseEnrollment.is_enrollment_closed(request.user, course):
locale = to_locale(get_language())
enrollment_end_date = format_datetime(course.enrollment_end, 'short', locale=locale)
params = urllib.urlencode({'course_closed': enrollment_end_date})
return redirect('{0}?{1}'.format(reverse('dashboard'), params))
# When a credit mode is available, students will be given the option
# to upgrade from a verified mode to a credit mode at the end of the course.
# This allows students who have completed photo verification to be eligible
# for univerity credit.
# Since credit isn't one of the selectable options on the track selection page,
# we need to check *all* available course modes in order to determine whether
# a credit mode is available. If so, then we show slightly different messaging
# for the verified track.
has_credit_upsell = any(
CourseMode.is_credit_mode(mode) for mode
in CourseMode.modes_for_course(course_key, only_selectable=False)
)
context = {
"course_modes_choose_url": reverse(
"course_modes_choose",
kwargs={'course_id': course_key.to_deprecated_string()}
),
"modes": modes,
"has_credit_upsell": has_credit_upsell,
"course_name": course.display_name_with_default_escaped,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"chosen_price": chosen_price,
"error": error,
"responsive": True,
"nav_hidden": True,
}
if "verified" in modes:
verified_mode = modes["verified"]
context["suggested_prices"] = [
decimal.Decimal(x.strip())
for x in verified_mode.suggested_prices.split(",")
if x.strip()
]
context["currency"] = verified_mode.currency.upper()
context["min_price"] = verified_mode.min_price
context["verified_name"] = verified_mode.name
context["verified_description"] = verified_mode.description
if verified_mode.sku:
context["use_ecommerce_payment_flow"] = ecommerce_service.is_enabled(request.user)
context["ecommerce_payment_page"] = ecommerce_service.payment_page_url()
context["sku"] = verified_mode.sku
context["bulk_sku"] = verified_mode.bulk_sku
return render_to_response("course_modes/choose.html", context)
0
Example 56
Project: ownCloud-for-KODI Source File: owncloud.py
def getMediaList(self, folderName='', cacheType=CACHE_TYPE_MEMORY):
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
opener.addheaders = self.getHeadersList()
if (self.version == self.OWNCLOUD_V6):
url = self.protocol + self.domain +'/index.php/apps/files?' + urllib.urlencode({'dir' : folderName})
else:
if folderName == 'ES':
url = self.protocol + self.domain + '/ocs/v1.php/apps/files_external/api/v1/mounts?format=json'
elif folderName == 'SL':
url = self.protocol + self.domain + '/ocs/v1.php/apps/files_sharing/api/v1/shares?format=json&shared_with_me=false'
else:
url = self.protocol + self.domain +'/index.php/apps/files/ajax/list.php?'+ urllib.urlencode({'dir' : folderName})+'&sort=name&sortdirection=asc'
# if action fails, validate login
try:
response = opener.open(url)
except urllib2.URLError, e:
self.login()
opener.addheaders = self.getHeadersList()
try:
response = opener.open(url)
except urllib2.URLError, e:
xbmc.log(self.addon.getAddonInfo('name') + ': ' + str(e), xbmc.LOGERROR)
return
response_data = response.read()
response.close()
for r in re.finditer('authentication_error' ,response_data, re.DOTALL):
self.login();
# if action fails, validate login
try:
response = opener.open(url)
except urllib2.URLError, e:
self.login()
opener.addheaders = self.getHeadersList()
try:
response = opener.open(url)
except urllib2.URLError, e:
xbmc.log(self.addon.getAddonInfo('name') + ': ' + str(e), xbmc.LOGERROR)
return
response_data = response.read()
response.close()
mediaFiles = []
# parsing page for files
if (self.version == self.OWNCLOUD_V6):
for r in re.finditer('\<tr data\-id\=.*?</tr>' ,response_data, re.DOTALL):
entry = r.group()
for q in re.finditer('data\-id\=\"([^\"]+)\".*?data\-file\=\"([^\"]+)\".*?data\-type\=\"([^\"]+)\".*?data\-mime\=\"([^\/]+)\/' ,entry, re.DOTALL):
fileID,fileName,contentType,fileType = q.groups()
try:
# fileName = unicode(fileName, "unicode-escape")
fileName = fileName.decode('unicode-escape')
fileName = fileName.encode('utf-8')
except:
pass
#fileName = unicode(fileName, "unicode-escape")
# Undo any urlencoding before displaying the files (should also make the folders accessible)
#fileName = urllib.unquote(fileName)
if fileType == 'video':
fileType = self.MEDIA_TYPE_VIDEO
elif fileType == 'audio':
fileType = self.MEDIA_TYPE_MUSIC
elif fileType == 'image':
fileType = self.MEDIA_TYPE_PICTURE
if contentType == 'dir':
mediaFiles.append(package.package(0,folder.folder(folderName+'/'+fileName,fileName)) )
else:
thumbnail = self.protocol + self.domain +'/index.php/core/preview.png?file='+folderName+ '/'+fileName + '&x=50&y=50'+'|' + self.getHeadersEncoded()
mediaFiles.append(package.package(file.file(fileName, fileName, fileName, fileType, '', thumbnail),folder.folder(folderName,folderName)) )
return mediaFiles
else:
for r in re.finditer('\[\{.*?\}\]' ,response_data, re.DOTALL):
entry = r.group()
for s in re.finditer('\{.*?\}' ,entry, re.DOTALL):
item = s.group()
fileID = ''
fileName = ''
fileType = ''
contentType = ''
etag = ''
thumbnail = ''
if folderName == 'ES':
for q in re.finditer('\"type\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
contentType = q.group(1)
break
for q in re.finditer('\"name\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
fileName = q.group(1)
break
elif folderName == 'SL':
for q in re.finditer('\"file_source\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
fileID = q.group(1)
break
for q in re.finditer('\"file_target\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
fileName = q.group(1)
break
for q in re.finditer('\"mimetype\"\:\"([^\/]+)\/' ,
item, re.DOTALL):
fileType = q.group(1)
break
for q in re.finditer('\"item_type\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
contentType = q.group(1)
break
if fileType == 'video\\':
fileType = self.MEDIA_TYPE_VIDEO
elif fileType == 'audio\\':
fileType = self.MEDIA_TYPE_MUSIC
elif fileType == 'image\\':
fileType = self.MEDIA_TYPE_PICTURE
else:
for q in re.finditer('\"id\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
fileID = q.group(1)
break
for q in re.finditer('\"name\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
fileName = q.group(1)
break
for q in re.finditer('\"mimetype\"\:\"([^\/]+)\/' ,
item, re.DOTALL):
fileType = q.group(1)
break
for q in re.finditer('\"type\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
contentType = q.group(1)
break
for q in re.finditer('\"etag\"\:\"([^\"]+)\"' ,
item, re.DOTALL):
etag = q.group(1)
break
thumbnail = self.protocol + self.domain +'/index.php/core/preview.png?file='+str(folderName)+ '/'+str(fileName) + '&c='+str(etag)+'&x=50&y=50&forceIcon=0'+'|' + self.getHeadersEncoded()
if fileType == 'video\\':
fileType = self.MEDIA_TYPE_VIDEO
elif fileType == 'audio\\':
fileType = self.MEDIA_TYPE_MUSIC
elif fileType == 'image\\':
fileType = self.MEDIA_TYPE_PICTURE
# fileName = unicode(fileName, "unicode-escape")
try:
# fileName = unicode(fileName, "unicode-escape")
fileName = fileName.decode('unicode-escape')
fileName = fileName.encode('utf-8')
except:
pass
# # Undo any urlencoding before displaying the files (should also make the folders accessible)
# fileName = urllib.unquote(fileName)
if contentType == 'dir':
mediaFiles.append(package.package(0,folder.folder(folderName+'/'+fileName,fileName)) )
else:
mediaFiles.append(package.package(file.file(fileName, fileName, fileName, fileType, '', thumbnail),folder.folder(folderName,folderName)) )
return mediaFiles
0
Example 57
Project: grr Source File: administrative.py
@flow.EventHandler(allow_client_access=True)
def ProcessMessage(self, message=None, event=None):
"""Processes this event."""
_ = event
client_id = message.source
nanny_msg = ""
flow_obj = aff4.FACTORY.Open(message.session_id, token=self.token)
# Log.
logging.info("Client crash reported, client %s.", client_id)
# Only kill the flow it is does not handle its own crashes. Some flows
# restart the client and therefore expect to get a crash notification.
if flow_obj.handles_crashes:
return
# Export.
stats.STATS.IncrementCounter("grr_client_crashes")
# Write crash data to AFF4.
client = aff4.FACTORY.Open(client_id, token=self.token)
client_info = client.Get(client.Schema.CLIENT_INFO)
status = rdf_flows.GrrStatus(message.payload)
crash_details = rdf_client.ClientCrash(
client_id=client_id,
session_id=message.session_id,
client_info=client_info,
crash_message=status.error_message,
timestamp=rdfvalue.RDFDatetime.Now(),
crash_type=self.well_known_session_id)
self.WriteAllCrashDetails(
client_id, crash_details, flow_session_id=message.session_id)
# Also send email.
to_send = []
try:
hunt_session_id = self._ExtractHuntId(message.session_id)
if hunt_session_id and hunt_session_id != message.session_id:
hunt_obj = aff4.FACTORY.Open(
hunt_session_id, aff4_type=implementation.GRRHunt, token=self.token)
email = hunt_obj.runner_args.crash_alert_email
if email:
to_send.append(email)
except aff4.InstantiationError:
logging.error("Failed to open hunt %s.", hunt_session_id)
email = config_lib.CONFIG["Monitoring.alert_email"]
if email:
to_send.append(email)
for email_address in to_send:
if status.nanny_status:
nanny_msg = "Nanny status: %s" % status.nanny_status
client = aff4.FACTORY.Open(client_id, token=self.token)
hostname = client.Get(client.Schema.HOSTNAME)
url = urllib.urlencode((("c", client_id), ("main", "HostInformation")))
context_html = rendering.FindRendererForObject(flow_obj.context).RawHTML()
state_html = rendering.FindRendererForObject(flow_obj.state).RawHTML()
args_html = rendering.FindRendererForObject(flow_obj.args).RawHTML()
runner_args_html = rendering.FindRendererForObject(
flow_obj.runner_args).RawHTML()
email_alerts.EMAIL_ALERTER.SendEmail(
email_address,
"GRR server",
"Client %s reported a crash." % client_id,
self.mail_template % dict(
client_id=client_id,
admin_ui=config_lib.CONFIG["AdminUI.url"],
hostname=hostname,
context=context_html,
state=state_html,
args=args_html,
runner_args=runner_args_html,
urn=url,
nanny_msg=nanny_msg,
signature=config_lib.CONFIG["Email.signature"]),
is_html=True)
if nanny_msg:
msg = "Client crashed, " + nanny_msg
else:
msg = "Client crashed."
# Now terminate the flow.
flow.GRRFlow.TerminateFlow(
message.session_id, reason=msg, token=self.token, force=True)
0
Example 58
Project: django-flows Source File: handler.py
def flow_entry_link(self, request, flow_class_or_name, on_complete_url=None,
with_state=False, initial_state=None,
flow_namespace=None,
url_args=None, url_kwargs=None, url_queryargs=None):
"""
This method is how you create a URL which allows a user to enter a flow.
There are two main times you will need this, both with different consequences
for the values you should pass as arguments.
1) If you display a page to a user including a URL which starts a flow, but which
the user may not necessarily begin, then you should use a stateless task. That
is to say, don't include `initial_state`. If the flow needs to behave differently
based on some context, then include it using `url_args` and `url_kwargs`, and have
the flow Action or Scaffold handle that in their `url` configuration. As an example,
you may be on a product page, and want a link for the user should they choose to
purchase it. In this case, few users may actually click the link, so creating
task state which will never be used is inefficient. In that case, the product ID
could be passed into the flow via URL parameters - this is more like a normal Django
URL
2) If a user performs an action that enters them into a flow explicitly. If a
user clicks something and you want the user to immediately enter a flow, you
can pass in `initial_state`.
Parameters
----------
request : django.http.HttpRequest
A request object supplied by Django
flow_class_or_name : str | Action | Scaffold
The flow entry point to create a URL for. This can be a string to do a lookup
by name, or the Action or Scaffold class itself, which is preferred. Note that
the flow action must have been registered as an entry point with this handler
via `register_entry_point`
on_complete_url : str
The URL to redirect to once the flow is complete, used if the actions do not
redirect themselves and instead rely on the automatic transitions.
with_state : bool
*Deprecated* By default, state is not created until the user begins the flow,
to avoid creating unnecessary database objects, especially when the link is
simply created to be displayed in a page. Sometimes, however, when constructing
a link to redirect to as the result of a user action, it's useful to create the
state for the flow. This has been deprecated in favour of "initial_state"
initial_state : dict
If you want to create a flow with some initial state, you can pass it in here. Note
this is not suitable for, eg, URLs on an HTML page, since it would create a task
state object for every pageview regardless of the user's intention. It is much
better if the user has performed an action, such as submitted a form, which
implies they want to immediately enter a flow. See also `url_args` and `url_kwargs`.
flow_namespace : str
url_args : list | tuple
url_kwargs : dict
The arguments to pass in to the URL for the flow, if your actions specify any
parameters in their URL pattern. This is typically the way to supply "initial
state" for flow entry URLs generated before a user action has taken place. See
also `initial_state`.
url_queryargs : dict
Query parameters to append to the generated URL. You should probably be using
`url_args` and `url_kwargs` along with URL patterns, or `initial_state`, instead.
"""
if initial_state is None:
initial_state = {}
else:
# override the with_state option in the case that
# we have some initial state to explicitly set
with_state = True
flow_class = get_by_class_or_name(flow_class_or_name)
position = PossibleFlowPosition(self.app_namespace, flow_namespace, flow_class.get_initial_action_tree())
if with_state:
if on_complete_url is not None:
initial_state['_on_complete'] = on_complete_url
state = self._new_state(request, **initial_state)
else:
state = {'_id': ''} # TODO: this is a bit of a hack, but task_id is required...
instance = position.create_instance(state, self.state_store, url_args=url_args, url_kwargs=url_kwargs)
# if we have state, then we need to include the task ID in the URL
# returned, otherwise it'll be seen as a "new entry" and new empty
# task state will be created
inst_url = instance.get_absolute_url(include_flow_id=with_state)
parts = urlparse.urlparse(inst_url)
query = urlparse.parse_qsl(parts.query)
if on_complete_url is not None:
query.append(('_on_complete', on_complete_url))
if url_queryargs is not None:
query.update(url_queryargs)
parts = list(parts)
parts[4] = urllib.urlencode(query, doseq=True)
return urlparse.urlunparse(parts)
0
Example 59
def request(url, method="GET", params=None, data=None, headers={},
timeout=None, files={}, randua=False, auth=None, length_limit=None,
proxies=None, trust_env=True, max_redirects=0, **kwargs):
"""request an URL
:arg string url: URL to be fetched.
:arg string method: (optional) HTTP method, one of ``GET``, ``DELETE``,
``HEAD``, ``OPTIONS``, ``PUT``, ``POST``, ``TRACE``,
``PATCH``. ``GET`` is the default.
:arg dict/string params: (optional) Dict or string to attach to url as
querystring.
:arg dict headers: (optional) HTTP request headers.
:arg float timeout: (optional) Timeout in seconds
:arg files: (optional) Files to be sended
:arg randua: (optional) If ``True`` or ``path string``, use a random
user-agent in headers, instead of
``'urlfetch/' + __version__``
:arg tuple auth: (optional) (username, password) for basic authentication
:arg int length_limit: (optional) If ``None``, no limits on content length,
if the limit reached raised exception 'Content length
is more than ...'
:arg dict proxies: (optional) HTTP proxy, like {'http': '127.0.0.1:8888',
'https': '127.0.0.1:563'}
:arg bool trust_env: (optional) If ``True``, urlfetch will get infomations
from env, such as HTTP_PROXY, HTTPS_PROXY
:arg int max_redirects: (integer, optional) Max redirects allowed within a
request. Default is 0, which means redirects are
not allowed.
:returns: A :class:`~urlfetch.Response` object
:raises: :class:`URLError`, :class:`UrlfetchException`,
:class:`TooManyRedirects`,
"""
def make_connection(conn_type, host, port, timeout):
"""Return HTTP or HTTPS connection."""
if conn_type == 'http':
conn = HTTPConnection(host, port, timeout=timeout)
elif conn_type == 'https':
conn = HTTPSConnection(host, port, timeout=timeout)
else:
raise URLError('Unknown Connection Type: %s' % conn_type)
return conn
via_proxy = False
method = method.upper()
if method not in ALLOWED_METHODS:
raise UrlfetchException("Method should be one of " +
", ".join(ALLOWED_METHODS))
if params:
if isinstance(params, dict):
url = url_concat(url, params)
elif isinstance(params, basestring):
if url[-1] not in ('?', '&'):
url += '&' if ('?' in url) else '?'
url += params
parsed_url = parse_url(url)
reqheaders = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, compress, identity, *',
'User-Agent': random_useragent(randua),
'Host': parsed_url['http_host']
}
# Proxy support
scheme = parsed_url['scheme']
if proxies is None and trust_env:
proxies = PROXIES
proxy = proxies.get(scheme)
if proxy and parsed_url['host'] not in PROXY_IGNORE_HOSTS:
via_proxy = True
if '://' not in proxy:
proxy = '%s://%s' % (scheme, proxy)
parsed_proxy = parse_url(proxy)
# Proxy-Authorization
if parsed_proxy['username'] and parsed_proxy['password']:
proxyauth = '%s:%s' % (parsed_proxy['username'],
parsed_proxy['password'])
proxyauth = base64.b64encode(proxyauth.encode('utf-8'))
reqheaders['Proxy-Authorization'] = 'Basic ' + \
proxyauth.decode('utf-8')
conn = make_connection(scheme, parsed_proxy['host'],
parsed_proxy['port'], timeout)
else:
conn = make_connection(scheme, parsed_url['host'], parsed_url['port'],
timeout)
if not auth and parsed_url['username'] and parsed_url['password']:
auth = (parsed_url['username'], parsed_url['password'])
if auth:
if isinstance(auth, (list, tuple)):
auth = '%s:%s' % tuple(auth)
auth = base64.b64encode(auth.encode('utf-8'))
reqheaders['Authorization'] = 'Basic ' + auth.decode('utf-8')
if files:
content_type, data = encode_multipart(data, files)
reqheaders['Content-Type'] = content_type
elif isinstance(data, dict):
data = urlencode(data, 1)
if isinstance(data, basestring) and not files:
# httplib will set 'Content-Length', also you can set it by yourself
reqheaders["Content-Type"] = "application/x-www-form-urlencoded"
# what if the method is GET, HEAD or DELETE
# just do not make so much decisions for users
reqheaders.update(headers)
start_time = time.time()
try:
request_url = url if via_proxy else parsed_url['uri']
conn.request(method, request_url, data, reqheaders)
resp = conn.getresponse()
except socket.timeout as e:
raise Timeout(e)
except Exception as e:
raise UrlfetchException(e)
end_time = time.time()
total_time = end_time - start_time
history = []
response = Response.from_httplib(resp, reqheaders=reqheaders,
length_limit=length_limit,
history=history[:], url=url,
total_time=total_time,
start_time=start_time)
while (response.status in (301, 302, 303, 307) and
'location' in response.headers and max_redirects):
response.body, response.close(), history.append(response)
if len(history) > max_redirects:
raise TooManyRedirects('max_redirects exceeded')
method = method if response.status == 307 else 'GET'
location = response.headers['location']
if location[:2] == '//':
url = parsed_url['scheme'] + ':' + location
else:
url = urlparse.urljoin(url, location)
parsed_url = parse_url(url)
reqheaders['Host'] = parsed_url['http_host']
reqheaders['Referer'] = response.url
# Proxy
scheme = parsed_url['scheme']
proxy = proxies.get(scheme)
if proxy and parsed_url['host'] not in PROXY_IGNORE_HOSTS:
via_proxy = True
if '://' not in proxy:
proxy = '%s://%s' % (parsed_url['scheme'], proxy)
parsed_proxy = parse_url(proxy)
# Proxy-Authorization
if parsed_proxy['username'] and parsed_proxy['password']:
proxyauth = '%s:%s' % (parsed_proxy['username'],
parsed_proxy['username'])
proxyauth = base64.b64encode(proxyauth.encode('utf-8'))
reqheaders['Proxy-Authorization'] = 'Basic ' + \
proxyauth.decode('utf-8')
conn = make_connection(scheme, parsed_proxy['host'],
parsed_proxy['port'], timeout)
else:
via_proxy = False
reqheaders.pop('Proxy-Authorization', None)
conn = make_connection(scheme, parsed_url['host'],
parsed_url['port'], timeout)
try:
request_url = url if via_proxy else parsed_url['uri']
conn.request(method, request_url, data, reqheaders)
resp = conn.getresponse()
except socket.timeout as e:
raise Timeout(e)
except Exception as e:
raise UrlfetchException(e)
response = Response.from_httplib(resp, reqheaders=reqheaders,
length_limit=length_limit,
history=history[:], url=url,
total_time=total_time,
start_time=start_time)
return response
0
Example 60
def find(self, movie, quality, type, retry = False):
self.cleanCache();
results = []
if not self.enabled() or not self.isAvailable(self.searchUrl):
return results
formatId = self.getFormatId(type)
catId = self.getCatId(type)
arguments = urlencode({
'searchaction': 'Search',
'u_url_posts_only': '0',
'u_show_passworded': '0',
'q_url': 'imdb.com/title/' + movie.imdb,
'sort': 'ps_totalsize',
'order': 'asc',
'u_post_results_amt': '100',
'feed': 'rss',
'category': '6',
'ps_rb_video_format': str(catId),
'ps_rb_source': str(formatId),
})
url = "%s?%s" % (self.searchUrl, arguments)
cacheId = str('%s %s %s' % (movie.imdb, str(formatId), str(catId)))
singleCat = True
try:
cached = False
if(self.cache.get(cacheId)):
data = True
cached = True
log.info('Getting RSS from cache: %s.' % cacheId)
else:
log.info('Searching: %s' % url)
data = self.urlopen(url, username = self.conf('username'), password = self.conf('password'))
self.cache[cacheId] = {
'time': time.time()
}
except (IOError, URLError):
log.error('Failed to open %s.' % url)
return results
if data:
try:
try:
if cached:
xml = self.cache[cacheId]['xml']
else:
xml = self.getItems(data)
self.cache[cacheId]['xml'] = xml
except:
log.debug('No valid xml or to many requests.. You never know with %s.' % self.name)
return results
for item in xml:
title = self.gettextelement(item, "title")
if 'error' in title.lower(): continue
try:
for attr in item.find('{%s}attributes' % self.REPORT_NS):
title += ' ' + attr.text
REPORT_NS = self.REPORT_NS
except:
for attr in item.find('{%s}attributes' % self.REPORT_NS_OLD):
title += ' ' + attr.text
REPORT_NS = self.REPORT_NS_OLD
id = int(self.gettextelement(item, '{%s}id' % REPORT_NS))
size = str(int(self.gettextelement(item, '{%s}size' % REPORT_NS)) / 1024 / 1024) + ' mb'
date = str(self.gettextelement(item, '{%s}postdate' % REPORT_NS))
new = self.feedItem()
new.id = id
new.type = 'nzb'
new.name = title
new.date = int(time.mktime(parse(date).timetuple()))
new.size = self.parseSize(size)
new.url = str(self.gettextelement(item, '{%s}nzb' % REPORT_NS))
new.detailUrl = str(self.gettextelement(item, 'link'))
new.content = self.gettextelement(item, "description")
new.score = self.calcScore(new, movie)
new.addbyid = True
new.checkNZB = False
new.download = self.download
if self.isCorrectMovie(new, movie, type, imdbResults = True, singleCategory = singleCat):
results.append(new)
log.info('Found: %s' % new.name)
return results
except:
log.error('Failed to parse XML response from newzbin2.es: %s' % traceback.format_exc())
return results
0
Example 61
def main(argv=None):
exit_err_code = 1
total_success = True
# Print/get script arguments
results = print_args()
if not results:
sys.exit(exit_err_code)
serverName, username, password, selectedServiceNames = results
# Get/generateToken a token from the sharing api of Portal. Use the secure (https) 7443 port.
portalPort = 7443
token = getToken(username, password, serverName, portalPort)
#print token
if token == "":
print "Could not generate a token with the username and password provided."
sys.exit(exit_err_code)
else:
if 'error' in token:
for error in token['error']:
if (str(error) == 'code' and error[0] != 200):
sys.exit(exit_err_code)
print '\n{}'.format(sectionBreak)
print 'Build Scene Service Cache'
print sectionBreak
# Get the list of (scene) services available from arcgis server
serverPort = 6443
HostedServiceEndpnt = '/arcgis/rest/services/Hosted'
data = getJsonResponse(serverName, username, password, HostedServiceEndpnt, token, serverPort)
obj = json.loads(data)
#service name comes in form of "Hosted/Buildings". Drop the folder name 'Hosted'
sceneServicenames = []
for service in obj['services']:
for key, value in service.iteritems():
if (key == 'name'):
if (value[:7] == 'Hosted/'):
name = str(value[7:])
else:
name = str(value)
if (key == 'type'):
if (value == 'SceneServer'):
sceneServicenames.append(name)
if len(sceneServicenames) == 0:
print '\nWARNING: Server {} does not have any scene services. Exiting script.'.format(serviceName)
sys.exit(0)
if selectedServiceNames is None:
selectedServiceNames = sceneServicenames
# Validate if specified scene services exist
invalidServiceNames = []
for selectedServiceName in selectedServiceNames:
if selectedServiceName not in sceneServicenames:
invalidServiceNames.append(selectedServiceName)
if len(invalidServiceNames) > 0:
print '\nERROR: the following specified scene services do not exist:'
print invalidServiceNames
sys.exit(exit_err_code)
if len(selectedServiceNames) > 0:
print '\nList of scene services to cache:'
for serviceName in selectedServiceNames:
print serviceName
for serviceName in selectedServiceNames:
print '\n{}'.format(sectionBreak1)
print serviceName
print sectionBreak1
# Todo: reject name if it doesn't match existing service
service_url = 'https://{}/arcgis/rest/services/Hosted/{}/SceneServer'.format(serverName, serviceName)
# For now, let's just comment out the code to retrieve, list, and
# allow the user to specify which layers in the scene service to cache
# # Get all layer id and names for serviceName provided by user and determine if cache is to be built only for specific layer/s (default is to build cache for all layers)
# ServiceEndpnt = '/arcgis/rest/services/Hosted/{}/SceneServer'.format(serviceName)
# data = getJsonResponse(serverName, username, password, ServiceEndpnt, token, serverPort)
# obj = json.loads(data)
#
# print ('Below are the list of layers available for caching as acquired from :' + '\n'
# 'https://' + serverName + ServiceEndpnt + '?f=pjson' + '\n')
#
# print ('If caching of a specific layer/s is desired just enter the layerID/s from the list below for the layer/s you are interested in caching.' + '\n'
# 'Default (if set to -1 or is not specified) is to cache all layers.' + '\n')
#
# print ('LayerName : LayerID')
#
# layerids = []
# layernames = []
# for layers in obj['layers']:
# for key, value in layers.iteritems():
# if (key == 'id'):
# layerids.append(int(value))
# if (key == 'name'):
# layernames.append(str(value))
# print_data(layernames, layerids)
#
# layerIDs = []
# if (len(selectedServiceNames) == 1):
# layerIDs = raw_input("Enter the layer id(s) of the layer(s) you\'d like to cache separated by comma. (ex.'3,5,8')")
# else:
# layerIDs = '-1'
# print 'Multiple services selected for caching. Selecting of individual layers to cache is disabled.'
#
# if (str(layerIDs) == '-1' or len(str(layerIDs)) <= 0):
# layer = "{}"
# else:
# layerIDIntlist = [int(e) if e.isdigit() else e for e in layerIDs.split(',')]
# layerJson = json_list(layerIDIntlist, 'id')
# layer = '{"layers":%s}' % (layerJson)
# Build cache for all layers
layer = '{}'
# Construct the parameters to submit to the 'Manage Scene cache' tool
num_of_caching_service_instances = 2
update_mode = 'RECREATE_ALL_NODES'
returnZ = 'false'
update_extent = 'DEFAULT'
area_of_interest = ''
params = urllib.urlencode({'token': token, 'f': 'json', 'service_url': service_url,
'num_of_caching_service_instances' : num_of_caching_service_instances,
'layer': layer, 'update_mode': update_mode, 'returnZ': returnZ,
'update_extent': update_extent, 'area_of_interest': area_of_interest})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "Referer": script_referrer}
# Format the GP service tool url
SceneCachingToolURL = '/arcgis/rest/services/System/SceneCachingControllers/GPServer/Manage%20Scene%20Cache'
submitJob = '{}/submitJob'.format(SceneCachingToolURL)
# Connect to URL and post parameters (using https!)
# Set the port to 6443 as it needs to be the https server port as portal communicates to server via a secure port
# Note if federated server is running on a different machine than the portal, chanage the 'serverName' parameter below accordingly.
httpConn = httplib.HTTPSConnection(serverName, serverPort)
httpConn.request("POST", submitJob, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print response.reason
return
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print 'Error returned by operation. ' + data
else:
print 'Scene Caching Job Submitted successfully!'
print 'Caching Job status updates every {} seconds...'.format(cacheJobStatusUpdateFreq)
# Extract the jobID from it
jobid = json.loads(data)
guidJobId = str(jobid['jobId'])
print 'JobID: {}'.format(guidJobId)
# get the job status from the tool..
SceneCachingToolJobsURL = '{}/jobs/{}'.format(SceneCachingToolURL, guidJobId)
# Check the status of the result object every n seconds until it stops execution..
result = True
while result == True:
time.sleep(cacheJobStatusUpdateFreq)
result = getJobStatusMessage(serverName, username, password, SceneCachingToolJobsURL, token, serverPort)
#return
print '\n\nScript {} completed.\n'.format(scriptName)
sys.exit(0)
0
Example 62
@csrf_exempt
def provider_login(request):
"""
OpenID login endpoint
"""
# pylint: disable=too-many-statements
# make and validate endpoint
endpoint = get_xrds_url('login', request)
if not endpoint:
return default_render_failure(request, "Invalid OpenID request")
# initialize store and server
store = DjangoOpenIDStore()
server = Server(store, endpoint)
# first check to see if the request is an OpenID request.
# If so, the client will have specified an 'openid.mode' as part
# of the request.
if request.method == 'GET':
querydict = dict(request.GET.items())
else:
querydict = dict(request.POST.items())
error = False
if 'openid.mode' in request.GET or 'openid.mode' in request.POST:
# decode request
try:
openid_request = server.decodeRequest(querydict)
except (UntrustedReturnURL, ProtocolError):
openid_request = None
if not openid_request:
return default_render_failure(request, "Invalid OpenID request")
# don't allow invalid and non-trusted trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# checkid_immediate not supported, require user interaction
if openid_request.mode == 'checkid_immediate':
return provider_respond(server, openid_request,
openid_request.answer(False), {})
# checkid_setup, so display login page
# (by falling through to the provider_login at the
# bottom of this method).
elif openid_request.mode == 'checkid_setup':
if openid_request.idSelect():
# remember request and original path
request.session['openid_setup'] = {
'request': openid_request,
'url': request.get_full_path(),
'post_params': request.POST,
}
# user failed login on previous attempt
if 'openid_error' in request.session:
error = True
del request.session['openid_error']
# OpenID response
else:
return provider_respond(server, openid_request,
server.handleRequest(openid_request), {})
# handle login redirection: these are also sent to this view function,
# but are distinguished by lacking the openid mode. We also know that
# they are posts, because they come from the popup
elif request.method == 'POST' and 'openid_setup' in request.session:
# get OpenID request from session
openid_setup = request.session['openid_setup']
openid_request = openid_setup['request']
openid_request_url = openid_setup['url']
post_params = openid_setup['post_params']
# We need to preserve the parameters, and the easiest way to do this is
# through the URL
url_post_params = {
param: post_params[param] for param in post_params if param.startswith('openid')
}
encoded_params = urllib.urlencode(url_post_params)
if '?' not in openid_request_url:
openid_request_url = openid_request_url + '?' + encoded_params
else:
openid_request_url = openid_request_url + '&' + encoded_params
del request.session['openid_setup']
# don't allow invalid trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# check if user with given email exists
# Failure is redirected to this method (by using the original URL),
# which will bring up the login dialog.
email = request.POST.get('email', None)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
request.session['openid_error'] = True
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"OpenID login failed - Unknown user email")
else:
msg = u"OpenID login failed - Unknown user email: {0}".format(email)
AUDIT_LOG.warning(msg)
return HttpResponseRedirect(openid_request_url)
# attempt to authenticate user (but not actually log them in...)
# Failure is again redirected to the login dialog.
username = user.username
password = request.POST.get('password', None)
try:
user = authenticate(username=username, password=password, request=request)
except RateLimitException:
AUDIT_LOG.warning(u'OpenID - Too many failed login attempts.')
return HttpResponseRedirect(openid_request_url)
if user is None:
request.session['openid_error'] = True
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"OpenID login failed - invalid password")
else:
AUDIT_LOG.warning(
u"OpenID login failed - password for %s is invalid", email)
return HttpResponseRedirect(openid_request_url)
# authentication succeeded, so fetch user information
# that was requested
if user is not None and user.is_active:
# remove error from session since login succeeded
if 'openid_error' in request.session:
del request.session['openid_error']
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"OpenID login success - user.id: %s", user.id)
else:
AUDIT_LOG.info(
u"OpenID login success - %s (%s)", user.username, user.email)
# redirect user to return_to location
url = endpoint + urlquote(user.username)
response = openid_request.answer(True, None, url)
# Note too that this is hardcoded, and not really responding to
# the extensions that were registered in the first place.
results = {
'nickname': user.username,
'email': user.email,
'fullname': user.profile.name,
}
# the request succeeded:
return provider_respond(server, openid_request, response, results)
# the account is not active, so redirect back to the login page:
request.session['openid_error'] = True
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(
u"Login failed - Account not active for user.id %s", user.id)
else:
AUDIT_LOG.warning(
u"Login failed - Account not active for user %s", username)
return HttpResponseRedirect(openid_request_url)
# determine consumer domain if applicable
return_to = request.GET.get('openid.return_to') or request.POST.get('openid.return_to') or ''
if return_to:
matches = re.match(r'\w+:\/\/([\w\.-]+)', return_to)
return_to = matches.group(1)
# display login page
response = render_to_response('provider_login.html', {
'error': error,
'return_to': return_to
})
# add custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('xrds', request)
return response
0
Example 63
Project: pyxtra Source File: pyxtra.py
def login(browser, username, password, anticaptcha=False, anticaptcha_max_tries=3):
"""Display the CAPTCHA and log in."""
captcha_tries = 0
if anticaptcha:
gorrion = GorrionService()
while 1:
try:
if not password:
password = getpass.getpass('Xtrazone password: ').strip()
# Get CAPTCHA URL
browser.open('https://xtrazone.sso.bluewin.ch/index.html')
browser.addheaders = [
('X-Requested-With', 'XMLHttpRequest'),
('X-Header-XtraZone', 'XtraZone'),
('Referer', 'https://xtrazone.sso.bluewin.ch/index.html'),
]
url = 'https://xtrazone.sso.bluewin.ch/index.php/20,53,ajax,,,283/' \
'?route=%2Flogin%2Fgetcaptcha'
data = {'action': 'getCaptcha',
'do_sso_login': 0,
'passphrase': '',
'sso_password': password,
'sso_user': username,
'token': '',
}
browser.open(url, urllib.urlencode(data))
resp = json.loads(browser.response().read()) # Convert response to dict
captcha_token = resp['content']['messages']['operation']['token']
captcha = ''
captcha_tries += 1
# Try to crack CAPTCHA automatically (Service by gorrion.ch)
if anticaptcha and captcha_tries <= anticaptcha_max_tries:
if captcha_tries == 1:
print 'Trying to crack CAPTCHA...'
try:
captcha = gorrion.get_captcha(captcha_token)
except Exception as e:
anticaptcha = False
print 'Error, cracking CAPTCHA failed (%s)' % str(e)
# User has to enter CAPTCHA manually
else:
if anticaptcha and captcha_tries == anticaptcha_max_tries + 1:
print 'Automatically cracking CAPTCHA failed. :('
captcha_url = 'http:%s' % resp['content']['messages']['operation']['imgUrl']
# Display CAPTCHA in a new window
tk_root = Tkinter.Tk(className='CAPTCHA')
img_bytes = StringIO(urllib.urlopen(captcha_url).read())
img_obj = Image.open(img_bytes)
img = ImageTk.PhotoImage(img_obj)
captcha_label = Tkinter.Label(tk_root, image=img)
captcha_label.pack()
# Get CAPTCHA text
while not captcha:
captcha = raw_input('Please enter CAPTCHA: ').strip()
# Destroy CAPTCHA window
try:
tk_root.destroy()
except Tkinter.TclError:
pass
# Log in
browser.addheaders = [
('X-Requested-With', 'XMLHttpRequest'),
('X-Header-XtraZone', 'XtraZone'),
('Referer', 'https://xtrazone.sso.bluewin.ch/index.html'),
]
url = 'https://xtrazone.sso.bluewin.ch/index.php/22,39,ajax_json,,,157/'
data = {
'action': 'ssoLogin',
'do_sso_login': 1,
'passphrase': captcha,
'sso_password': password,
'sso_user': username,
'token': captcha_token,
}
browser.open(url, urllib.urlencode(data))
resp = json.loads(browser.response().read())
if resp['status'] == 'captcha_failed':
raise RuntimeError('CAPTCHA failed: %s' % resp['message'])
if resp['status'] != 'login_ok':
raise RuntimeError('Login failed: %s' % resp['message'])
# Everything worked fine :)
if anticaptcha and captcha_tries <= anticaptcha_max_tries:
if captcha: # Report successful CAPTCHAs to gorrion
try:
gorrion.report(captcha, 1)
except GorrionError as e:
print 'Anticaptcha reporting: %s' % str(e)
break
except RuntimeError as e:
if anticaptcha and captcha_tries <= anticaptcha_max_tries:
if captcha:
pass # TODO Possibly report to gorrion
if captcha_tries > anticaptcha_max_tries:
print 'Wrong CAPTCHA. Try again.'
if anticaptcha:
del gorrion
0
Example 64
Project: Flibber Source File: flibber.py
def reqURL(url, post="", proto="GET", reqType="API"):
global count, dataDict, response, globErrorMessage
global API_DELAY, LIKE_DELAY, REL_DELAY
global totalAPICalls, totalErrors, errorLevel, lastAPI
bytesIO = BytesIO()
pc = pycurl.Curl()
signature = hmac.new(
options.CLIENT_SECRET, options.IP, sha256).hexdigest()
header = '|'.join([options.IP, signature])
header = ["X-Insta-Forwarded-For: " + header]
post_data = {'access_token': options.ACCESS_TOKEN,
'client_id': options.CLIENT_ID}
post_data.update(post)
postfields = urllib.urlencode(post_data)
if proto == "POST":
pc.setopt(pc.CUSTOMREQUEST, 'POST')
pc.setopt(pc.POSTFIELDS, postfields)
else:
getURL = url
url = url + "?" + postfields
pc.setopt(pc.CUSTOMREQUEST, 'GET')
pc.setopt(pc.URL, str(url))
pc.setopt(pc.WRITEFUNCTION, bytesIO.write)
pc.setopt(pc.HEADERFUNCTION, headerFunction)
pc.setopt(pycurl.HTTPHEADER, header)
count = count + 1
timeDifference = currentTime() - lastAPI
if timeDifference < API_DELAY:
execPause(API_DELAY - timeDifference)
if len(APIArray) > 0:
while APIArray[0] <= currentTime() - 3600:
del APIArray[0]
if len(relArray) >= 4999:
waitTime = currentTime() - APIArray[0] - 3600
execPause(waitTime)
try:
totalAPICalls = totalAPICalls + 1
pc.perform()
response = str(pc.getinfo(pc.HTTP_CODE))
pc.close()
encoding = None
if 'content-type' in headers:
content_type = headers['content-type'].lower()
match = re.search('charset=(\S+)', content_type)
if match:
encoding = match.group(1)
if encoding is None:
encoding = 'iso-8859-1'
body = bytesIO.getvalue()
dataDict = simplejson.loads(body)
printMsg(tCol.BOLD + 'Request #' + str(count), "NUM#", "HEADER")
try:
printMsg('Remaining API calls: ' + tCol.FAIL +
headers['x-ratelimit-remaining'] + '/' +
headers['x-ratelimit-limit'] + tCol.ENDC, "RATE",
"OKBLUE")
except Exception:
execPause(1)
except Exception as e:
dataDict = ""
response = "500"
error_message = e
errorLevel = errorLevel + 1
if errorLevel > 8:
printMsg("Error level exceeded, check options.",
"ERRO", "FAIL")
sys.exit(1)
if proto == "POST":
printMsg(url, "RURL", "OKBLUE")
else:
printMsg(getURL, "RURL", "OKBLUE")
printMsg(postfields, "FLDS", "OKBLUE")
printMsg(proto, "HTTP", "OKBLUE")
try:
if response == "200":
lastAPI = currentTime()
errorLevel = 0
printMsg(response, "CODE")
APIArray.append(currentTime())
elif response == "500":
totalErrors = totalErrors + 1
globErrorMessage = str(error_message)
if globErrorMessage == "(23, 'Failed writing header')":
print ""
printMsg(tCol.BOLD + "Keyboard Interrupt!", "INPT", "FAIL")
sys.exit(1)
printMsg(str(error_message), "ERRO", "FAIL")
elif response != "200":
totalErrors = totalErrors + 1
error_message = dataDict["meta"]["error_message"]
error_type = dataDict["meta"]["error_type"]
printMsg(response, "CODE", "FAIL")
printMsg(error_type, "TYPE", "FAIL")
printMsg(error_message, "FAIL", "FAIL")
if response == "400" and \
error_type == "OAuthAccessTokenException":
sys.exit(1)
if response == "429":
rates = [int(s) for s in error_message.split()
if s.isdigit()]
printMsg("Rate exceeded: " + tCol.FAIL + str(rates[0]) +
"/" + str(rates[1]) + tCol.WARNING +
" in the last hour.", "RATE", "WARNING")
if reqType == "Like":
LIKE_DELAY = LIKE_DELAY + 1
rateArray = likeArray
rateLen = 99
elif reqType == "Relation":
REL_DELAY = REL_DELAY + 1
rateArray = relArray
rateLen = 99
else:
API_DELAY = API_DELAY + 1
rateArray = APIArray
rateLen = 4999
rateDiff = rateLen - len(rateArray)
if rateDiff > 0:
while len(rateArray) < rateLen:
rateArray.append(currentTime())
rateArray[0] = currentTime() - 3900
waitTime = 0
waitTime = currentTime() - rateArray[0] - 3600
execPause(waitTime)
reqURL(url, post, proto, reqType)
except Exception:
return
return dataDict
0
Example 65
Project: rest_gae Source File: users.py
def get_user_rest_class(**kwd):
"""Returns a USerRESTHandlerClass with the permissions set according to input"""
class UserRESTHandlerClass(BaseRESTHandler):
model = import_class(kwd.get('user_model', User))
email_as_username = kwd.get('email_as_username', False)
admin_only_user_registration = kwd.get('admin_only_user_registration', False)
user_details_permission = kwd.get('user_details_permission', PERMISSION_OWNER_USER)
verify_email_address = kwd.get('verify_email_address', False)
verification_email = kwd.get('verification_email', None)
verification_successful_url = kwd.get('verification_successful_url', None)
verification_failed_url = kwd.get('verification_failed_url', None)
reset_password_url = kwd.get('reset_password_url', None)
reset_password_email = kwd.get('reset_password_email', None)
user_policy_callback = [kwd.get('user_policy_callback', None)]
send_email_callback = [kwd.get('send_email_callback', None)] # Wrapping in a list so the function won't be turned into a bound method
allow_login_for_non_verified_email = kwd.get('allow_login_for_non_verified_email', True)
# Validate arguments (we do this at this stage in order to raise exceptions immediately rather than while the app is running)
if (model != User) and (User not in model.__bases__):
raise ValueError('The provided user_model "%s" does not inherit from rest_gae.users.User class' % (model))
if verify_email_address and not verification_email:
raise ValueError('Must set "verification_email" when "verify_email_address" is True')
if verification_email and set(verification_email.keys()) != set(['sender', 'subject', 'body_text', 'body_html']):
raise ValueError('"verification_email" must include all of the following keys: sender, subject, body_text, body_html')
if verify_email_address and not verification_successful_url:
raise ValueError('Must set "verification_successful_url" when "verify_email_address" is True')
if verify_email_address and not verification_failed_url:
raise ValueError('Must set "verification_failed_url" when "verify_email_address" is True')
if verify_email_address and not reset_password_url:
raise ValueError('Must set "reset_password_url" when "verify_email_address" is True')
if verify_email_address and not reset_password_email:
raise ValueError('Must set "reset_password_email" when "verify_email_address" is True')
if reset_password_email and set(reset_password_email.keys()) != set(['sender', 'subject', 'body_text', 'body_html']):
raise ValueError('"reset_password_email" must include all of the following keys: sender, subject, body_text, body_html')
permissions = { 'GET': PERMISSION_ANYONE, 'PUT': PERMISSION_OWNER_USER, 'DELETE': PERMISSION_OWNER_USER, 'POST': PERMISSION_ANYONE } # Used by get_response method when building the HTTP response header 'Access-Control-Allow-Methods'
def __init__(self, request, response):
self.initialize(request, response)
self.send_email_callback = self.send_email_callback[0]
def rest_method_wrapper(func):
"""Wraps GET/POST/PUT/DELETE methods and adds standard functionality"""
def inner_f(self, model_id):
# We make sure the auth session store is using the proper user model (we can't rely on the user initializing it from outside the library)
self.auth.store.user_model = self.model
method_name = func.func_name.upper()
try:
# Call original method
if model_id:
model_id = model_id[1:] # Get rid of '/' at the beginning
if model_id == 'me':
# 'me' is shorthand for the currently logged-in user
if not self.user:
# User tried to retrieve information about himself without being logged-in
raise self.unauthorized()
model = self.user
elif (method_name == 'POST' and model_id in ['login', 'reset']) or (method_name == 'GET' and model_id == 'verify'):
model = model_id
else:
model = self._model_id_to_model(model_id)
return func(self, model)
else:
return func(self, None)
except RESTException, exc:
return self.error(exc)
return inner_f
#
# REST endpoint methods
#
@rest_method_wrapper
def get(self, model):
"""GET endpoint - returns all users (if admin and not user id provided) or a specific user's details otherwise"""
if not model:
# Return all users (if admin)
if not self.user:
# Must be logged-in
return self.unauthorized()
if not self.user.is_admin:
# Must be an admin
return self.permission_denied()
query = self._filter_query() # Filter the results
query = self._order_query(query) # Order the results
(results, cursor) = self._fetch_query(query) # Fetch them (with a limit / specific page, if provided)
return self.success({
'results': results,
'next_results_url': self._build_next_query_url(cursor)
})
elif model == 'verify':
# It's an email verification link
user_id = self.request.GET.get('user_id')
signup_token = self.request.GET.get('signup_token')
verification_type = self.request.GET.get('type')
if not user_id or not signup_token or not verification_type:
return self.redirect(self.verification_failed_url)
try:
user_id = int(user_id)
except ValueError, exc:
return self.redirect(self.verification_failed_url)
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
try:
user, ts = self.user_model.get_by_auth_token(user_id, signup_token, 'signup')
if not user: raise Exception()
except:
return self.redirect(self.verification_failed_url)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# User verified his email address after registration
# Remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
# Mark user's email address as verified
if not user.is_email_verified:
user.is_email_verified = True
user.put()
return self.redirect(self.verification_successful_url)
elif verification_type == 'p':
# User wants to reset his password
# Redirect to password reset URL with the token
return self.redirect(self.reset_password_url + '?signup_token=' + signup_token)
else:
# Unknown verification type
return self.redirect(self.verification_failed_url)
# Return the details of a single user (by ID)
if self.user_details_permission != PERMISSION_ANYONE:
# Verify permissions
if not self.user:
# Must be logged-in
return self.unauthorized()
if (self.user_details_permission == PERMISSION_OWNER_USER) and (self.user != model) and (not self.user.is_admin):
# The owning user (and admins) is only one that can view his own user details
return self.permission_denied()
if (self.user_details_permission == PERMISSION_ADMIN) and (not self.user.is_admin):
# Must be an admin
return self.permission_denied()
# Return user details
return self.success(model)
@rest_method_wrapper
def post(self, model):
"""POST endpoint - registers a new user"""
if model and model not in ['login', 'reset']:
# Invalid usage of the endpoint
raise RESTException('Cannot POST to a specific user ID')
if model and model == 'reset':
# Send a password reset email
try:
# Parse POST data as JSON
json_data = json.loads(self.request.body)
except ValueError, exc:
raise RESTException('Invalid JSON POST data')
if 'user_name' not in json_data:
raise RESTException('Missing user_name argument')
user = self.user_model.get_by_auth_id(json_data['user_name'])
if not user:
raise RESTException('User not found: %s' % json_data['user_name'])
# Send the reset password email
self._send_verification_email(user, self.reset_password_email, True)
return self.success({})
elif model and model == 'login':
# Login the user
try:
# Parse POST data as JSON
json_data = json.loads(self.request.body)
except ValueError, exc:
raise RESTException('Invalid JSON POST data')
if 'user_name' not in json_data:
raise RESTException('Missing user_name argument')
if 'password' not in json_data:
raise RESTException('Missing password argument')
try:
user = self.auth.get_user_by_password(json_data['user_name'], json_data['password'], remember=True, save_session=True)
except (InvalidAuthIdError, InvalidPasswordError) as e:
# Login failed
return self.permission_denied('Invalid user name / password')
if not self.allow_login_for_non_verified_email and not user.is_email_verified:
# Don't allow the user to login since he hasn't verified his email address yet.
return self.permission_denied('Email address not verified')
# Login successful
return self.success(user)
#
# Register a new user
#
if self.admin_only_user_registration:
if not self.user:
# Must be logged-in
return self.unauthorized()
if not self.user.is_admin:
# Must be admin
return self.permission_denied()
try:
# Parse POST data as JSON
json_data = json.loads(self.request.body)
except ValueError, exc:
raise RESTException('Invalid JSON POST data')
try:
# Any exceptions raised due to invalid/missing input will be caught
if self.user_policy_callback is not None and self.user_policy_callback[0] is not None:
json_data = self.user_policy_callback[0](self.user, json_data)
if not 'email' in json_data:
raise ValueError('Missing email')
if not self.email_as_username and not 'user_name' in json_data:
raise ValueError('Missing user_name')
if not 'password' in json_data:
raise ValueError('Missing password')
user_name = json_data['email'] if self.email_as_username else json_data['user_name']
password = json_data['password']
# Sanitize the input
json_data.pop('user_name', None)
json_data.pop('password', None)
json_data.pop('is_email_verified', None)
if self.user and self.user.is_admin:
# Allow admins to create a new user and set his access level
is_admin = json_data.get('is_admin', False)
else:
is_admin = False
json_data.pop('is_admin', None)
user_properties = { }
# Make sure only properties defined in the user model will be written (since the parent webapp2 User model is an ExpandoModel)
for prop_name in self.model._properties.keys():
if prop_name in json_data:
user_properties[prop_name] = json_data[prop_name]
unique_properties = ['email']
user_data = self.model.create_user(
user_name,
unique_properties,
password_raw=password,
is_email_verified=(False if self.verify_email_address else True),
is_admin=is_admin,
**user_properties
)
if not user_data[0]:
# Caused due to multiple keys (i.e. the user is already registered or the username/email is taken by someone else)
existing_fields = ['user_name' if s == 'auth_id' else s for s in user_data[1]]
raise RESTException('Unable to register user - the following fields are already registered: %s' % (', '.join(existing_fields)))
if self.verify_email_address:
# Send email verification
user = user_data[1]
self._send_verification_email(user, self.verification_email)
# Return the newly-created user
return self.success(user_data[1])
except Exception, exc:
raise RESTException('Invalid JSON POST data - %s' % exc)
def _send_verification_email(self, user, email, reset_password=False):
"""Sends a verification email to a specific `user` with specific email details (in `email`). Creates a reset password link if `reset_password` is True."""
# Prepare the verification URL
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
path_url = self.request.path_url
path_url = path_url[:-len('verify')] if path_url.endswith('reset') else path_url
path_url = path_url.rstrip('/')
verification_params = { 'type': ('v' if not reset_password else 'p'), 'user_id': user_id, 'signup_token': token }
verification_url = path_url + '/verify?' + urlencode(verification_params)
# Prepare email body
email['body_text'] = Template(email['body_text']).render(user=user, verification_url=verification_url)
email['body_html'] = Template(email['body_html']).render(user=user, verification_url=verification_url)
# Send the email
if self.send_email_callback:
# Use the provided function for sending the email
self.send_email_callback(email)
else:
# Use GAE's email services
message = mail.EmailMessage()
message.sender = email['sender']
message.to = user.email
message.subject = email['subject']
message.body = email['body_text']
message.html = email['body_html']
message.send()
@rest_method_wrapper
def put(self, model):
"""PUT endpoint - updates a user's details"""
if not model:
# Invalid usage of the endpoint
raise RESTException('Must provide user ID for PUT endpoint')
if not self.user:
# Must be logged-in
return self.unauthorized()
if (self.user != model) and (not self.user.is_admin):
# The owning user (and admins) is only one that can update his own user details
return self.permission_denied()
try:
# Parse PUT data as JSON
json_data = json.loads(self.request.body)
except ValueError, exc:
raise RESTException('Invalid JSON PUT data')
# Update the user
try:
# Any exceptions raised due to invalid/missing input will be caught
if self.user_policy_callback is not None:
self.user_policy_callback[0](self.user, json_data)
model = self._build_model_from_data(json_data, self.model, model)
if self.user.is_admin:
# Allow the admin to change sensitive properties
if json_data.has_key('is_admin'):
model.is_admin = json_data['is_admin']
if json_data.has_key('is_email_verified'):
model.is_email_verified = json_data['is_email_verified']
if json_data.has_key('password'):
# Change password if requested
model.set_password(json_data['password'])
if json_data.has_key('signup_token'):
# Remove signup token (generated from a reset password link), we don't want users to come back with an old link
self.user_model.delete_signup_token(self.user.get_id(), json_data['signup_token'])
model.put()
except Exception, exc:
raise RESTException('Invalid JSON PUT data - %s' % exc)
# Return the updated user details
return self.success(model)
@rest_method_wrapper
def delete(self, model):
"""DELETE endpoint - deletes an existing user"""
if not model:
# Invalid usage of the endpoint
raise RESTException('Must provide user ID for DELETE endpoint')
if not self.user:
# Must be logged-in
return self.unauthorized()
if (self.user != model) and (not self.user.is_admin):
# The owning user (and admins) is only one that can delete his own account
return self.permission_denied()
# Delete the user
try:
self.user_model.remove_unique(model.email, ['email'], email=model.email)
model.key.delete()
except Exception, exc:
raise RESTException('Could not delete user - %s' % exc)
# Return the deleted user instance
return self.success(model)
# Return the class statically initialized with given input arguments
return UserRESTHandlerClass
0
Example 66
Project: OWSLib Source File: wms130.py
def getmap(self, layers=None,
styles=None,
srs=None,
bbox=None,
format=None,
size=None,
time=None,
elevation=None,
dimensions={},
transparent=False,
bgcolor='#FFFFFF',
exceptions='XML',
method='Get',
timeout=None,
**kwargs
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
Note: this is an invalid query parameter key for 1.3.0 but is being
retained for standardization with 1.1.1.
Note: throws exception if the spatial ref is ESRI's "no reference"
code (EPSG:0)
bbox : tuple
(left, bottom, right, top) in srs units (note, this order does not
change depending on axis order of the crs).
CRS:84: (long, lat)
EPSG:4326: (lat, long)
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
time : string or list or range
Optional. Time value of the specified layer as ISO-8601 (per value)
elevation : string or list or range
Optional. Elevation value of the specified layer.
dimensions: dict (dimension : string or list or range)
Optional. Any other Dimension option, as specified in the GetCapabilities
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
wms = WebMapService('http://webservices.nationalatlas.gov/wms/1million',\
version='1.3.0')
img = wms.getmap(layers=['airports1m'],\
styles=['default'],\
srs='EPSG:4326',\
bbox=(-176.646, 17.7016, -64.8017, 71.2854),\
size=(300, 300),\
format='image/jpeg',\
transparent=True)
out = open('example.jpg.jpg', 'wb')
out.write(img.read())
out.close()
"""
try:
base_url = next((m.get('url') for m in
self.getOperationByName('GetMap').methods if
m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = self.__build_getmap_request(
layers=layers,
styles=styles,
srs=srs,
bbox=bbox,
dimensions=dimensions,
elevation=elevation,
format=format,
size=size,
time=time,
transparent=transparent,
bgcolor=bgcolor,
exceptions=exceptions,
**kwargs)
data = urlencode(request)
self.request = bind_url(base_url) + data
u = openURL(base_url,
data,
method,
username=self.username,
password=self.password,
timeout=timeout or self.timeout)
# need to handle casing in the header keys
headers = {}
for k, v in six.iteritems(u.info()):
headers[k.lower()] = v
# handle the potential charset def
if headers['content-type'].split(';')[0] in ['application/vnd.ogc.se_xml', 'text/xml']:
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = six.text_type(se_tree.find(nspath('ServiceException', OGC_NAMESPACE)).text).strip()
raise ServiceException(err_message)
return u
0
Example 67
def request(method, url, params=None, data=None, headers=None, cookies=None,
files=None, auth=None, timeout=60, allow_redirects=False):
"""Initiate an HTTP(S) request. Returns :class:`Response` object.
:param method: 'GET' or 'POST'
:type method: ``unicode``
:param url: URL to open
:type url: ``unicode``
:param params: mapping of URL parameters
:type params: :class:`dict`
:param data: mapping of form data ``{'field_name': 'value'}`` or
:class:`str`
:type data: :class:`dict` or :class:`str`
:param headers: HTTP headers
:type headers: :class:`dict`
:param cookies: cookies to send to server
:type cookies: :class:`dict`
:param files: files to upload (see below).
:type files: :class:`dict`
:param auth: username, password
:type auth: ``tuple``
:param timeout: connection timeout limit in seconds
:type timeout: ``int``
:param allow_redirects: follow redirections
:type allow_redirects: ``Boolean``
:returns: :class:`Response` object
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
* ``fieldname`` is the name of the field in the HTML form.
* ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
be used to guess the mimetype, or ``application/octet-stream``
will be used.
"""
socket.setdefaulttimeout(timeout)
# Default handlers
openers = []
if not allow_redirects:
openers.append(NoRedirectHandler())
if auth is not None: # Add authorisation handler
username, password = auth
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
openers.append(auth_manager)
# Install our custom chain of openers
opener = urllib2.build_opener(*openers)
urllib2.install_opener(opener)
if not headers:
headers = CaseInsensitiveDictionary()
else:
headers = CaseInsensitiveDictionary(headers)
if 'user-agent' not in headers:
headers['user-agent'] = USER_AGENT
# Accept gzip-encoded content
encodings = [s.strip() for s in
headers.get('accept-encoding', '').split(',')]
if 'gzip' not in encodings:
encodings.append('gzip')
headers['accept-encoding'] = ', '.join(encodings)
if files:
if not data:
data = {}
new_headers, data = encode_multipart_formdata(data, files)
headers.update(new_headers)
elif data and isinstance(data, dict):
data = urllib.urlencode(str_dict(data))
# Make sure everything is encoded text
headers = str_dict(headers)
if isinstance(url, unicode):
url = url.encode('utf-8')
if params: # GET args (POST args are handled in encode_multipart_formdata)
url = url + '?' + urllib.urlencode(str_dict(params))
req = urllib2.Request(url, data, headers)
return Response(req)
0
Example 68
Project: python-compat-runtime Source File: appengine_rpc_httplib2.py
def Send(self, request_path, payload='',
content_type='application/octet-stream',
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
Raises:
AuthPermanentFail: If authorization failed in a permanent way.
urllib2.HTTPError: On most HTTP errors.
"""
self.http = self.http_object or self.http_class(
cache=self.memory_cache, ca_certs=self.certpath,
disable_ssl_certificate_validation=(not self.cert_file_available))
self.http.follow_redirects = False
self.http.timeout = timeout
url = '%s://%s%s' % (self.scheme, self.host, request_path)
if kwargs:
url += '?' + urllib.urlencode(sorted(kwargs.items()))
headers = {}
if self.extra_headers:
headers.update(self.extra_headers)
headers['X-appcfg-api-version'] = '1'
if payload is not None:
method = 'POST'
headers['content-length'] = str(len(payload))
headers['Content-Type'] = content_type
else:
method = 'GET'
if self.host_override:
headers['Host'] = self.host_override
rpc_errors = 0
auth_errors = [0]
conflict_errors = 0
timeout_errors = 0
def NeedAuth():
"""Marker that we need auth; it'll actually be tried next time around."""
auth_errors[0] += 1
logger.debug('Attempting to auth. This is try %s of %s.',
auth_errors[0], self.auth_max_errors)
if auth_errors[0] > self.auth_max_errors:
RaiseHttpError(url, response_info, response, 'Too many auth attempts.')
while (rpc_errors < self.rpc_max_errors and
conflict_errors < self.conflict_max_errors and
timeout_errors < self.timeout_max_errors):
self._Authenticate(self.http, auth_errors[0] > 0)
logger.debug('Sending request to %s headers=%s body=%s',
url, headers,
self.debug_data and payload or payload and 'ELIDED' or '')
try:
response_info, response = self.http.request(
url, method=method, body=payload, headers=headers)
except client.AccessTokenRefreshError, e:
logger.info('Got access token error', exc_info=1)
response_info = httplib2.Response({'status': 401})
response_info.reason = str(e)
response = ''
status = response_info.status
if status == 200:
return response
logger.debug('Got http error %s.', response_info.status)
if status == 401:
NeedAuth()
continue
elif status == 408:
timeout_errors += 1
logger.debug('Got timeout error %s of %s. Retrying in %s seconds',
timeout_errors, self.timeout_max_errors,
_TIMEOUT_WAIT_TIME)
time.sleep(_TIMEOUT_WAIT_TIME)
continue
elif status == 409:
conflict_errors += 1
wait_time = random.randint(0, 10)
logger.debug('Got conflict error %s of %s. Retrying in %s seconds.',
conflict_errors, self.conflict_max_errors, wait_time)
time.sleep(wait_time)
continue
elif status >= 500 and status < 600:
rpc_errors += 1
logger.debug('Retrying. This is attempt %s of %s.',
rpc_errors, self.rpc_max_errors)
continue
elif status == 302:
loc = response_info.get('location')
logger.debug('Got 302 redirect. Location: %s', loc)
if (loc.startswith('https://www.google.com/accounts/ServiceLogin') or
re.match(r'https://www\.google\.com/a/[a-z0-9.-]+/ServiceLogin',
loc)):
NeedAuth()
continue
elif loc.startswith('http://%s/_ah/login' % (self.host,)):
RaiseHttpError(url, response_info, response,
'dev_appserver login not supported')
else:
RaiseHttpError(url, response_info, response,
'Unexpected redirect to %s' % loc)
else:
logger.debug('Unexpected results: %s', response_info)
RaiseHttpError(url, response_info, response,
'Unexpected HTTP status %s' % status)
logging.info('Too many retries for url %s', url)
RaiseHttpError(url, response_info, response)
0
Example 69
Project: zorna Source File: api.py
def forms_form_browse_entries(request, slug, entries=None):
try:
form = FormsForm.objects.select_related(depth=1).get(slug=slug)
except:
return None
# dont't verify access, caller must do it for us
extra_context = {}
kwargs = {}
sort = ''
if entries:
kwargs['entries'] = entries
extra_context['filters'] = {}
extra_context['field_filters'] = []
form_fields = form.fields.select_related().all().order_by('label')
form.fields_reference = {}
hidden = request.GET.get('hidden', '')
if hidden:
hidden = hidden.split(',')
else:
hidden = []
for f in form_fields:
fl = request.GET.get(f.slug, None)
if fl:
kwargs[f.slug] = smart_str(fl)
if f.for_sort:
sort = f.slug
if not f.visible_in_list:
hidden.append(f.slug)
extra_context['params'] = urllib.urlencode(kwargs)
kwargs['f'] = request.GET.get('f', '')
kwargs['q'] = request.GET.get('q', '')
kwargs['o'] = request.GET.get('o', sort)
kwargs['ot'] = request.GET.get('ot', 'asc')
kwargs['where'] = request.GET.get('where', '')
extra_context['parents'] = []
if kwargs['where']:
try:
bte = kwargs['where'].split(':')
extra_context['where_entry_id'] = entry_id = bte[1]
bte = bte[0].split('.')
extra_context['where_form_slug'] = form_slug = bte[0]
extra_context['where_form_field'] = form_field = bte[1]
while entry_id:
e = FormsFormEntry.objects.select_related().get(
pk=entry_id, form__slug=form_slug)
c, r = forms_get_entry(e)
extra_context['parents'].append({'row': r[
form_field], 'entry': e})
hidden.append(r[form_field]['slug'])
if not e.form.bind_to_entry:
break
bte = e.form.bind_to_entry.split('.')
form_slug = bte[0]
form_field = bte[1]
entry_id = e.entry.pk
except Exception as e:
extra_context['parents'] = []
extra_context['parents'].reverse()
kwargs['hidden'] = ','.join(hidden)
columns, entries = forms_get_entries(form, **kwargs)
for f in form_fields:
if '.' in f.reference and f.is_a(*fields.CHOICES):
fl = request.GET.get(f.slug, None)
choices = [('', '--- %s ---' % f.label)]
for e in form.fields_reference[f.pk]:
choices.append((e[0], e[1]))
s = forms.Select(choices=choices)
extra_context['filters'][f.label] = s.render(f.slug, fl, None)
else:
extra_context['field_filters'].append([f.label, f.slug])
if fl:
kwargs[f.slug] = fl
paginator = Paginator(entries, 25)
page = int(request.GET.get('page', 1))
try:
rows = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
rows = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
rows = paginator.page(paginator.num_pages)
extra_context['q'] = kwargs['q']
extra_context['f'] = kwargs['f']
extra_context['column_filter'] = kwargs['o']
extra_context['column_order'] = kwargs['ot']
extra_context['zorna_title_page'] = _(u'Forms')
extra_context['form'] = form
extra_context['columns'] = columns
extra_context['entries'] = entries
extra_context['rows'] = rows
extra_context['page'] = page
extra_context['paginator'] = paginator
extra_context['where'] = kwargs['where']
try:
r = form.bind_to_entry.split('.')
extra_context['bind_entry_slug'] = r[0]
extra_context['bind_entry_field'] = r[1]
except:
pass
extra_context['where'] = kwargs['where']
return extra_context
0
Example 70
Project: libpebble Source File: PblAnalytics.py
def post_event(self, category, action, label, value=None):
""" Send an event to the analytics collection server.
We are being a little un-orthodox with how we use the fields in the
event and are hijacking some of the fields for alternature purposes:
Campaign Name ('cn'): We are using this to represent the operating
system as returned by python.platform(). We tried putting this into the
user-agent string but it isn't picked up by the Google Analytics web
UI for some reason - perhaps it's the wrong format for that purpose.
Campaign Source ('cs'): We are also copying the client id ('cid') to
this field. The 'cid' field is not accessible from the web UI but the
'cs' field is.
Campaign Keyword ('ck'): We are using this to store the python version.
Parameters:
----------------------------------------------------------------
category: The event category
action: The event action
label: The event label
value: The optional event value (integer)
"""
data = {}
data['v'] = 1
data['tid'] = self.tracking_id
data['cid'] = self.client_id
# TODO: Set this to PEBBLE-INTERNAL or PEBBLE-AUTOMATED as appropriate
data['cn'] = self.os_str
data['cs'] = self.client_id
data['ck'] = platform.python_version()
# Generate an event
data['t'] = 'event'
data['ec'] = category
data['ea'] = action
data['el'] = label
if value:
data['ev'] = value
else:
data['ev'] = 0
# Convert all strings to utf-8
for key,value in data.items():
if isinstance(value, basestring):
if isinstance(value, unicode):
data[key] = value.encode('utf-8')
else:
data[key] = unicode(value, errors='replace').encode('utf-8')
headers = {
'User-Agent': self.user_agent
}
# We still build up the request but just don't send it if
# doNotTrack is on. Building it up allows us to still generate
# debug logging messages to see the content we would have sent
if self.do_not_track:
logging.debug("Not sending analytics - tracking disabled")
else:
request = Request(self.endpoint,
data=urlencode(data),
headers = headers)
try:
urlopen(request, timeout=0.1)
except Exception as e:
# Turn off tracking so we don't incur a delay on subsequent
# events in this same session.
self.do_not_track = True
logging.debug("Exception occurred sending analytics: %s" %
str(e))
logging.debug("Disabling analytics due to intermittent "
"connectivity")
# Debugging output?
dumpDict = dict(data)
for key in ['ec', 'ea', 'el', 'ev']:
dumpDict.pop(key, None)
logging.debug("[Analytics] header: %s, data: %s"
"\ncategory: %s"
"\naction: %s"
"\nlabel: %s"
"\nvalue: %s" %
(headers, str(dumpDict),
data['ec'], data['ea'], data['el'], data['ev']))
0
Example 71
Project: drawquest-web Source File: views.py
def get_signup_context(request, skip_invite_code=None, template="user/signup.django.html",
cookies_to_set={}, cookies_to_delete=[]):
"""
Returns an error context (or dict) if the signup is not successful.
Returns `None` for successful signups.
`cookies_to_set` and `cookies_to_delete` should be passed empty so that this functin may append to them.
`cookies_to_set` is for session cookies, to tie into after_signup.py / after_signup.js.
"""
skip_invite_code = skip_invite_code or request.GET.get('skip_invite_code', '').lower()
bypass_copy = settings.SHORT_CODE_COPY.get(skip_invite_code)
skippable_codes = (['dicksoup', 'angelgate', 'friends_and_family', 'herpderp', 'fffffat', 'buzzfeedbrews']
+ settings.SHORT_CODES)
login_url = '/login'
if request.REQUEST.get('next'):
next = request.REQUEST['next']
params = [urllib.urlencode({'next': next})]
if request.method == 'POST':
next_params = request.POST.get('next_params', '')
else:
next_params = request.GET.copy()
del next_params['next']
next_params = urllib.urlencode(next_params)
if next_params:
params.append(next_params)
login_url = login_url + '?' + u'&'.join(params)
try:
fb_user, fb_api = util.get_fb_api(request)
except NotLoggedIntoFacebookError:
fb_user, fb_api = None, None
fb_uid = fb_user.get('uid') if fb_user else None
fb_invite = None
if request.COOKIES.get('fb_message_id'):
fb_invite = FacebookInvite.objects.get_or_none(fb_message_id=request.COOKIES.get('fb_message_id'))
cookies_to_delete.append('fb_message_id')
if not fb_invite and fb_uid:
fb_invite = FacebookInvite.get_invite(fb_user.get('uid'))
if request.method == 'GET':
return locals()
username = request.POST.get('username', '')
password = request.POST.get('password', '')
email = request.POST.get('email', '')
if not fb_uid:
fb_uid = request.POST.get('facebook_id', None)
def error(message, context=locals()):
context['message'] = message
Metrics.signup_form_invalid.record(request)
return context
if check_rate_limit(request, username):
return error("Too many failed signup attempts. Wait a minute and try again.")
if not password:
return error("Password required.")
if not User.validate_password(password):
return error("Sorry, your password is too short. Please use 5 or more characters.")
error_msg = User.validate_username(username)
if error_msg:
return error(error_msg)
if not User.validate_email(email):
return error("Please enter a valid email address.")
if not User.email_is_unused(email):
return error("This email address is already in use. Try <a href='/login'>signing in</a> "
"or <a href='/password_reset'>resetting</a> your password if you've forgotten it.")
if fb_uid and not UserInfo.facebook_is_unused(fb_uid):
return error("This Facebook account is already in use. Try <a href='/login'>signing in</a> "
"or <a href='/password_reset'>resetting</a> your password if you've forgotten it.")
try:
user = User.objects.create_user(username, email, password)
except IntegrityError:
return error("Username taken.")
if not fb_uid:
fb_uid = None
UserInfo(user=user, invite_bypass=skip_invite_code,
facebook_id=fb_uid, enable_timeline=True).save()
if fb_invite:
fb_invite.invitee = user
fb_invite.save()
user = auth.authenticate(username=username, password=password)
# Handle following featured groups and optionally one defined by their short code.
if skip_invite_code:
autofollow = settings.SHORT_CODE_AUTOFOLLOW.get(skip_invite_code)
if autofollow:
to_follow.append(autofollow)
economy.grant_daily_free_stickers(request.user, force=True, count=knobs.SIGNUP_FREE_STICKERS)
# Follow the Canvas account.
try:
user.redis.following.sadd(User.objects.get(username=settings.CANVAS_ACCOUNT_USERNAME).id)
except User.DoesNotExist:
pass
# Logged-out remix?
cookie_key, post_data = after_signup.get_posted_comment(request)
if post_data:
post_comment(request, user, post_data)
cookies_to_delete.append(cookie_key)
inviter_id = request.session.get('inviter')
if inviter_id:
user.kv.inviter = inviter_id
del request.session['inviter']
inviter = User.objects.get(pk=inviter_id)
user.follow(inviter)
inviter.follow(user)
# DEPRECATED. Use after_signup.py / after_signup.js now instead.
extra_info = request.POST.get("info")
if extra_info:
extra_info = util.loads(extra_info)
if extra_info.get('in_flow') == 'yes':
fact.record('flow_signup', request, {})
# A user may have come to signup by remixing/replying, and we've got their post data to submit and send them
# to.
if not post_data:
post_data = extra_info.get('post')
if post_data:
post_comment(request, user, post_data)
old_session_key = request.session.session_key
def _after_signup():
if fb_api:
app_requests = fb_api.get_object('/me/apprequests/').get('data', [])
for app_request in app_requests:
if id in app_request:
fb.delete_object(app_request['id'])
Metrics.signup.record(request, old_session_key=old_session_key, username=username, email=email)
if 'failed_signup' in request.session:
del request.session['failed_signup']
Metrics.signup_second_try.record(request)
if template == 'signup/_signup_prompt.html':
Metrics.signup_prompt.record(request)
else:
Metrics.signup_main.record(request)
bgwork.defer(_after_signup)
# auth.login starts a new session and copies the session data from the old one to the new one
auth.login(request, user)
experiments.migrate_from_request_to_user(request, user)
0
Example 72
Project: pyquery Source File: pyquery.py
def __init__(self, *args, **kwargs):
html = None
elements = []
self._base_url = None
self.parser = kwargs.get('parser', None)
if 'parser' in kwargs:
del kwargs['parser']
if len(args) >= 1 and isinstance(args[0], basestring) \
and args[0].startswith('http://'):
kwargs['url'] = args[0]
if len(args) >= 2:
kwargs['data'] = args[1]
args = []
if 'parent' in kwargs:
self._parent = kwargs.pop('parent')
else:
self._parent = no_default
if kwargs:
# specific case to get the dom
if 'filename' in kwargs:
html = open(kwargs['filename'])
elif 'url' in kwargs:
url = kwargs.pop('url')
if 'opener' in kwargs:
opener = kwargs.pop('opener')
html = opener(url)
else:
method = kwargs.get('method')
data = kwargs.get('data')
if type(data) in (dict, list, tuple):
data = urlencode(data)
if isinstance(method, basestring) and method.lower() == 'get' and data:
if '?' not in url:
url += '?'
elif url[-1] not in ('?', '&'):
url += '&'
url += data
data = None
if data and PY3k:
data = data.encode('utf-8')
html = urlopen(url, data)
if not self.parser:
self.parser = 'html'
self._base_url = url
else:
raise ValueError('Invalid keyword arguments %s' % kwargs)
elements = fromstring(html, self.parser)
else:
# get nodes
# determine context and selector if any
selector = context = no_default
length = len(args)
if len(args) == 1:
context = args[0]
elif len(args) == 2:
selector, context = args
else:
raise ValueError("You can't do that." +\
" Please, provide arguments")
# get context
if isinstance(context, basestring):
try:
elements = fromstring(context, self.parser)
except Exception:
raise ValueError(context)
elif isinstance(context, self.__class__):
# copy
elements = context[:]
elif isinstance(context, list):
elements = context
elif isinstance(context, etree._Element):
elements = [context]
# select nodes
if elements and selector is not no_default:
xpath = selector_to_xpath(selector)
results = [tag.xpath(xpath) for tag in elements]
# Flatten the results
elements = []
for r in results:
elements.extend(r)
list.__init__(self, elements)
0
Example 73
def check_http(req, session, candidate, config, sessionid=False, csrf=False):
matches = list()
data = None
headers = dict()
delay = config.get('delay', 10)
url = get_base_url(req)
logger.debug('[check_http] base url: %s' % url)
urls = candidate['auth']['url']
if candidate['auth'].get('headers', None):
canheaders = candidate['auth']['headers']
logger.debug('[check_http] candidate headers: %s' % canheaders)
for head in canheaders:
headers.update(head)
headers.update(config['useragent'])
else:
headers = config['useragent']
rendered = render_creds(candidate, csrf)
for cred in rendered:
logger.debug('[check_http] %s - %s:%s' % (
candidate['name'],
cred['username'],
cred['username'],))
res = None
for u in urls:
url = get_base_url(req) + u
logger.debug("[check_http] url: %s" % url)
logger.debug('[check_http] data: %s' % cred['data'])
try:
if candidate['auth']['type'] == 'post' or candidate['auth']['type'] == 'raw_post':
res = session.post(
url,
cred['data'],
cookies=sessionid,
verify=False,
proxies=config['proxy'],
timeout=config['timeout'],
headers=headers,
)
else:
qs = urllib.urlencode(cred['data'])
url = "%s?%s" % (url, qs)
logger.debug("[check_http] url: %s" % url)
res = session.get(
url,
cookies=sessionid,
verify=False,
proxies=config['proxy'],
timeout=config['timeout'],
headers=headers,
)
except Exception as e:
logger.error("[check_http] Failed to connect to %s" % url)
logger.debug("[check_http] Exception: %s" %
e.__str__().replace('\n', '|'))
continue
logger.debug('[check_http] res.status_code: %i' % res.status_code)
logger.debug('[check_http] res.text: %s' % res.text)
#adding sleep and try again if 429 status code received.
# response code 429 is too many requests. Some appliances or WAFs may respond this way if
# there are too many requests from the same source in a certain amount of time.
if res.status_code == 429:
logger.warn('[check_http] Status 429 received. Sleeping for %d seconds and trying again' % delay)
sleep(delay)
try:
if candidtate['auth']['type'] == 'post' or candidate['auth']['type'] == 'raw_post':
res = session.post(
url,
cred['data'],
cookies=sessionid,
verify=False,
proxies=config['proxy'],
timeout=config['timeout'],
headers=headers,
)
else:
res = session.get(
url,
cookies=sessionid,
verify=False,
proxies=config['proxy'],
timeout=config['timeout'],
headers=headers,
)
except Exception as e:
logger.error('[check_http] Failed to connect to %s' % url)
logger.debug('[check_http] Exception: %s: %s' %
e.__str__().replace('\n', '|'))
continue
logger.error('[check_http] res.status_code: %i' % res.status_code)
logger.debug('[check_http] res.text: %s' % res.text)
if res and check_success(req, res, candidate, cred['username'], cred['password'], candidate['auth'].get('base64', None)):
matches.append(candidate)
logger.debug('[check_http] matches: %s' % matches)
return matches
0
Example 74
Project: edx-platform Source File: views.py
@cache_control(must_revalidate=True, max_age=settings.FOOTER_BROWSER_CACHE_MAX_AGE)
def footer(request):
"""Retrieve the branded footer.
This end-point provides information about the site footer,
allowing for consistent display of the footer across other sites
(for example, on the marketing site and blog).
It can be used in one of two ways:
1) A client renders the footer from a JSON description.
2) A browser loads an HTML representation of the footer
and injects it into the DOM. The HTML includes
CSS and JavaScript links.
In case (2), we assume that the following dependencies
are included on the page:
a) JQuery (same version as used in edx-platform)
b) font-awesome (same version as used in edx-platform)
c) Open Sans web fonts
Example: Retrieving the footer as JSON
GET /api/branding/v1/footer
Accepts: application/json
{
"navigation_links": [
{
"url": "http://example.com/about",
"name": "about",
"title": "About"
},
# ...
],
"social_links": [
{
"url": "http://example.com/social",
"name": "facebook",
"icon-class": "fa-facebook-square",
"title": "Facebook",
"action": "Sign up on Facebook!"
},
# ...
],
"mobile_links": [
{
"url": "http://example.com/android",
"name": "google",
"image": "http://example.com/google.png",
"title": "Google"
},
# ...
],
"legal_links": [
{
"url": "http://example.com/terms-of-service.html",
"name": "terms_of_service",
"title': "Terms of Service"
},
# ...
],
"openedx_link": {
"url": "http://open.edx.org",
"title": "Powered by Open edX",
"image": "http://example.com/openedx.png"
},
"logo_image": "http://example.com/static/images/logo.png",
"copyright": "EdX, Open edX, and the edX and Open edX logos are \
registered trademarks or trademarks of edX Inc."
}
Example: Retrieving the footer as HTML
GET /api/branding/v1/footer
Accepts: text/html
Example: Including the footer with the "Powered by OpenEdX" logo
GET /api/branding/v1/footer?show-openedx-logo=1
Accepts: text/html
Example: Retrieving the footer in a particular language
GET /api/branding/v1/footer?language=en
Accepts: text/html
Example: Retrieving the footer with all JS and CSS dependencies (for testing)
GET /api/branding/v1/footer?include-dependencies=1
Accepts: text/html
"""
if not branding_api.is_enabled():
raise Http404
# Use the content type to decide what representation to serve
accepts = request.META.get('HTTP_ACCEPT', '*/*')
# Show the OpenEdX logo in the footer
show_openedx_logo = bool(request.GET.get('show-openedx-logo', False))
# Include JS and CSS dependencies
# This is useful for testing the end-point directly.
include_dependencies = bool(request.GET.get('include-dependencies', False))
# Override the language if necessary
language = request.GET.get('language', translation.get_language())
# Render the footer information based on the extension
if 'text/html' in accepts or '*/*' in accepts:
cache_key = u"branding.footer.{params}.html".format(
params=urllib.urlencode({
'language': language,
'show_openedx_logo': show_openedx_logo,
'include_dependencies': include_dependencies,
})
)
content = cache.get(cache_key)
if content is None:
with translation.override(language):
content = _render_footer_html(request, show_openedx_logo, include_dependencies)
cache.set(cache_key, content, settings.FOOTER_CACHE_TIMEOUT)
return HttpResponse(content, status=200, content_type="text/html; charset=utf-8")
elif 'application/json' in accepts:
cache_key = u"branding.footer.{params}.json".format(
params=urllib.urlencode({
'language': language,
'is_secure': request.is_secure(),
})
)
footer_dict = cache.get(cache_key)
if footer_dict is None:
with translation.override(language):
footer_dict = branding_api.get_footer(is_secure=request.is_secure())
cache.set(cache_key, footer_dict, settings.FOOTER_CACHE_TIMEOUT)
return JsonResponse(footer_dict, 200, content_type="application/json; charset=utf-8")
else:
return HttpResponse(status=406)
0
Example 75
Project: pyNSSFClient Source File: sample_share.py
def _get_files_by_list(self, file_identifiers, destination_directory,
compression=None, extra_attributes = ""):
logger.debug("Getting file list %s to %s with compression %s",
file_identifiers, destination_directory, compression)
# If there's nothing to do, return
if len(file_identifiers) < 1:
return
# Make sure they're all lower-case, for later comparison purposes
file_identifiers = [x.lower() for x in file_identifiers]
# Verify that all identifiers are the same type
expected_length = len(file_identifiers[0])
for file_identifier in file_identifiers:
if len(file_identifier) != expected_length:
raise ValueError("All file hashes must be the same type")
hash_type = SampleShare.get_hash_type(file_identifiers[0])
logger.debug("hash type = %s", hash_type)
# Per spec, "Legacy clients without support for sha1/sha256 will
# use 'md5list'"
hash_list_arg = ':'.join(file_identifiers)
if hash_type == "md5":
post_data = {"md5list" : hash_list_arg}
else:
post_data = {"hashlist" : hash_list_arg}
logger.debug("post_data: %s", post_data)
post_data = urllib.urlencode(post_data)
compression_option = ("&compression=%s" % (compression)
if compression else "")
request_url = ("https://%s?action=getfile_by_list&user=%s&hash_type=%s%s%s" %
(self.url, self.username, hash_type,
compression_option, extra_attributes))
logger.debug("request_url: %s", request_url)
opener = self.create_opener(request_url)
response = opener.open(request_url, post_data)
file_identifier_length = (SampleShare.get_hash_length(hash_type) *
CHARS_PER_BYTE)
# Loop through, expecting each of the file IDs requested (they may not
# return the files in the same order, so ignore the iterator)
for file_identifier in file_identifiers:
# Per doc at https://sampleshare.norman.com/signup/framework.php,
# response is formatted like:
# <10 byte 0-padded file size><hash of file (length based on
# requested hash type><file data>...
# Get the file size
file_length_string = response.read(FILE_SIZE_LENGTH)
if len(file_length_string) != FILE_SIZE_LENGTH:
raise SampleShareError("Unable to read file length")
file_length = long(file_length_string)
# Get the file hash
file_identifier = response.read(file_identifier_length)
if len(file_identifier) != file_identifier_length:
raise SampleShareError("Unable to read file hash")
logger.debug("processing %s", file_identifier)
# Make sure that the file is an expected file
if not file_identifier.lower() in file_identifiers:
logger.error("Unknown file: %s", file_identifier)
file_data = response.read(file_length)
if len(file_data) != file_length:
raise SampleShareError("Unable to read file data for " +
"unknown file")
raise SampleShareError("Result contains unexpected file: %s" %
file_identifier)
destination_filename = os.path.join(destination_directory,
file_identifier)
logger.debug("Getting file with hash %s (%d bytes) to %s " +
"with compression %s", file_identifier, file_length,
destination_filename, compression)
# Get the file data
file_data = response.read(file_length)
if len(file_data) != file_length:
raise SampleShareError("Unable to read file data")
try:
SampleShare._process_file(destination_filename, file_data,
compression, file_identifier)
except StandardError as ex:
logger.error("Problem processing file %s: %s",
file_identifier, ex)
raise SampleShareError("Couldn't process file %s: %s" %
(file_identifier, ex))
0
Example 76
def createSite(username, password, dataDrive, cacheDrive):
success = True
try:
print
print "--Create ArcGIS Server Site..."
print
agsCache = OpsServerConfig.getCacheRootPath(cacheDrive)
pathList = ["arcgisserver", "directories"]
agsData = makePath(serverDrive, pathList)
pathList = ["arcgisserver", "config-store"]
agsConfig = makePath(serverDrive, pathList)
# Set up required properties for config store
print "\t-Setting up required properties for config store..."
configStoreConnection={"connectionString": agsConfig, "type": "FILESYSTEM"}
print "\tDone."
print
# Set up paths for server directories
jobsDirPath = os.path.join(agsData, "arcgisjobs")
outputDirPath = os.path.join(agsData, "arcgisoutput")
systemDirPath = os.path.join(agsData, "arcgissystem")
# Create Python dictionaries representing server directories
print "\t-Creating Python dictionaries representing server directories"
print "\t\t(arcgiscache, arcgisjobs, arcgisoutput, arcgissystem)..."
cacheDir = dict(name = "arcgiscache",physicalPath = agsCache,directoryType = "CACHE",cleanupMode = "NONE",maxFileAge = 0,description = "Stores tile caches used by map, globe, and image services for rapid performance.", virtualPath = "")
jobsDir = dict(name = "arcgisjobs",physicalPath = jobsDirPath, directoryType = "JOBS",cleanupMode = "TIME_ELAPSED_SINCE_LAST_MODIFIED",maxFileAge = 360,description = "Stores results and other information from geoprocessing services.", virtualPath = "")
outputDir = dict(name = "arcgisoutput",physicalPath = outputDirPath,directoryType = "OUTPUT",cleanupMode = "TIME_ELAPSED_SINCE_LAST_MODIFIED",maxFileAge = 10,description = "Stores various information generated by services, such as map images.", virtualPath = "")
systemDir = dict(name = "arcgissystem",physicalPath = systemDirPath,directoryType = "SYSTEM",cleanupMode = "NONE",maxFileAge = 0,description = "Stores files that are used internally by the GIS server.", virtualPath = "")
print "\tDone."
print
# Serialize directory information to JSON
print "\t-Serializing server directory information to JSON..."
directoriesJSON = json.dumps(dict(directories = [cacheDir, jobsDir, outputDir, systemDir]))
print "\tDone."
print
# Construct URL to create a new site
print "\t-Constructing URL to create a new site..."
createNewSiteURL = "/arcgis/admin/createNewSite"
print "\tDone."
print
# Set up parameters for the request
print "\t-Setting up parameters for the request to create new site..."
params = urllib.urlencode({'username': username, 'password': password, 'configStoreConnection': configStoreConnection, 'directories':directoriesJSON, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
print "\tDone."
print
# Connect to URL and post parameters
print "\t-Making request to create new site..."
httpConn = httplib.HTTPConnection(servername, serverPort)
httpConn.request("POST", createNewSiteURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "\tERROR: Error while creating the site."
print
success = False
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "\tERROR: Error returned by operation. " + str(data)
print
else:
print "\tSite created successfully."
print "\tDone."
print
except:
success = False
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "cuem* ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
# Return success flag
return success
0
Example 77
Project: plugin.video.nbcsnliveextra Source File: cable_one.py
def SELF_LOGIN(self,login_url):
cj = cookielib.LWPCookieJar(os.path.join(ADDON_PATH_PROFILE, 'cookies.lwp'))
cj.load(os.path.join(ADDON_PATH_PROFILE, 'cookies.lwp'),ignore_discard=True)
ck = cookielib.Cookie(version=0, name='s_cc', value='true', port=None, port_specified=False, domain='identity1.dishnetwork.com', domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
cj.set_cookie(ck)
ck = cookielib.Cookie(version=0, name='s_sq', value='s_sq=synacortveauth%3D%2526pid%253DFederated%252520Login%2526pidt%253D1%2526oid%253Dauthsynacor_auth.cableone.net%2526oidt%253D3%2526ot%253DSUBMIT', port=None, port_specified=False, domain='identity1.dishnetwork.com', domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
cj.set_cookie(ck)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", login_url),
("User-Agent", UA_IPHONE)]
login_data = urllib.urlencode({'username' : USERNAME,
'password' : PASSWORD,
'source' : 'authsynacor_auth.cableone.net',
'source_button' : 'authsynacor_auth.cableone.net'
})
resp = opener.open(login_url,login_data)
if resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
final_response = f.read()
else:
final_response = resp.read()
resp.close()
print "FINAL RESPONSE"
print final_response
last_url = resp.geturl()
final_response = final_response.replace('\n',"")
discovery_url = FIND(final_response,'location.href = "','"')
saml_response = ''
relay_state = ''
if 'captcha' in final_response:
saml_response = 'captcha'
if discovery_url != '':
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", last_url),
("User-Agent", UA_IPHONE)]
resp = opener.open(discovery_url)
source = resp.read()
print resp.geturl()
last_url = resp.geturl()
#print source
resp.close()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", last_url),
("User-Agent", UA_IPHONE)]
resp = opener.open(last_url+"&history=3")
if resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
source = f.read()
#source = resp.read()
print resp.geturl()
last_url = resp.geturl()
print source
resp.close()
saml_response = FIND(source,'<input type="hidden" name="SAMLResponse" value="','"')
proxy_url = 'https://adobe.auth-gateway.net/saml/module.php/saml/sp/saml2-acs.php/proxy_auth.cableone.net'
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", last_url),
("User-Agent", UA_IPHONE)]
body = urllib.urlencode({'SAMLResponse' : saml_response})
resp = opener.open(proxy_url,body)
if resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
source = f.read()
#source = resp.read()
print resp.geturl()
last_url = resp.geturl()
print source
resp.close()
discovery_url = FIND(source,'location.href = "','"')
print discovery_url
saml_response = ''
relay_state = ''
if 'captcha' in source:
saml_response = 'captcha'
if discovery_url != '':
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", last_url),
#("Cookie", cookies),
("User-Agent", UA_IPHONE)]
resp = opener.open(discovery_url)
source = resp.read()
print resp.geturl()
last_url = resp.geturl()
#print source
resp.close()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", last_url),
("User-Agent", UA_IPHONE)]
resp = opener.open(last_url+"&history=4")
if resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
source = f.read()
#source = resp.read()
print resp.geturl()
last_url = resp.geturl()
print source
resp.close()
saml_response = FIND(source,'<input type="hidden" name="SAMLResponse" value="','"')
relay_state = FIND(source,'<input type="hidden" name="RelayState" value="','"')
#Set Global header fields
ORIGIN = 'https://adobe.auth-gateway.net'
REFERER = last_url
SAVE_COOKIE(cj)
return saml_response, relay_state
0
Example 78
def request(method, url, params=None, data=None, headers=None, cookies=None,
files=None, auth=None, timeout=60, allow_redirects=False):
"""Initiate an HTTP(S) request. Returns :class:`Response` object.
:param method: 'GET' or 'POST'
:type method: ``unicode``
:param url: URL to open
:type url: ``unicode``
:param params: mapping of URL parameters
:type params: :class:`dict`
:param data: mapping of form data ``{'field_name': 'value'}`` or
:class:`str`
:type data: :class:`dict` or :class:`str`
:param headers: HTTP headers
:type headers: :class:`dict`
:param cookies: cookies to send to server
:type cookies: :class:`dict`
:param files: files to upload (see below).
:type files: :class:`dict`
:param auth: username, password
:type auth: ``tuple``
:param timeout: connection timeout limit in seconds
:type timeout: ``int``
:param allow_redirects: follow redirections
:type allow_redirects: ``Boolean``
:returns: :class:`Response` object
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
* ``fieldname`` is the name of the field in the HTML form.
* ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
be used to guess the mimetype, or ``application/octet-stream``
will be used.
"""
# TODO: cookies
# TODO: any way to force GET or POST?
socket.setdefaulttimeout(timeout)
# Default handlers
openers = []
if not allow_redirects:
openers.append(NoRedirectHandler())
if auth is not None: # Add authorisation handler
username, password = auth
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
openers.append(auth_manager)
# Install our custom chain of openers
opener = urllib2.build_opener(*openers)
urllib2.install_opener(opener)
if not headers:
headers = CaseInsensitiveDictionary()
else:
headers = CaseInsensitiveDictionary(headers)
if 'user-agent' not in headers:
headers['user-agent'] = USER_AGENT
# Accept gzip-encoded content
encodings = [s.strip() for s in
headers.get('accept-encoding', '').split(',')]
if 'gzip' not in encodings:
encodings.append('gzip')
headers['accept-encoding'] = ', '.join(encodings)
if files:
if not data:
data = {}
new_headers, data = encode_multipart_formdata(data, files)
headers.update(new_headers)
elif data and isinstance(data, dict):
data = urllib.urlencode(str_dict(data))
# Make sure everything is encoded text
headers = str_dict(headers)
if isinstance(url, unicode):
url = url.encode('utf-8')
if params: # GET args (POST args are handled in encode_multipart_formdata)
url = url + '?' + urllib.urlencode(str_dict(params))
req = urllib2.Request(url, data, headers)
return Response(req)
0
Example 79
Project: plugin.video.stalker Source File: server.py
def do_GET(self):
global portals, server;
try:
if re.match('.*channels-([0-9])\..*|.*channels\..*\?portal=([0-9])', self.path):
host = self.headers.get('Host');
searchObj = re.search('.*channels-([0-9])\..*|.*channels\..*\?portal=([0-9])', self.path);
if searchObj.group(1) != None:
numportal = searchObj.group(1);
elif searchObj.group(2) != None:
numportal = searchObj.group(2);
else:
self.send_error(400,'Bad Request');
return;
portal = portals[numportal];
EXTM3U = "#EXTM3U\n";
try:
data = load_channels.getAllChannels(portal['mac'], portal['url'], portal['serial'], addondir);
data = load_channels.orderChannels(data['channels'].values());
for i in data:
name = i["name"];
cmd = i["cmd"];
tmp = i["tmp"];
number = i["number"];
genre_title = i["genre_title"];
genre_id = i["genre_id"];
logo = i["logo"];
if logo != '':
logo = portal['url'] + '/stalker_portal/misc/logos/320/' + logo;
parameters = urllib.urlencode( { 'channel' : cmd, 'tmp' : tmp, 'portal' : numportal } );
EXTM3U += '#EXTINF:-1, tvg-id="' + number + '" tvg-name="' + name + '" tvg-logo="' + logo + '" group-title="' + genre_title + '", ' + name + '\n';
EXTM3U += 'http://' + host + '/live.m3u?' + parameters +'\n\n';
except Exception as e:
EXTM3U += '#EXTINF:-1, tvg-id="Error" tvg-name="Error" tvg-logo="" group-title="Error", ' + portal['name'] + ' ' + str(e) + '\n';
EXTM3U += 'http://\n\n';
self.send_response(200)
self.send_header('Content-type', 'application/x-mpegURL')
#self.send_header('Content-type', 'text/html')
self.send_header('Connection', 'close')
self.send_header('Content-Length', len(EXTM3U))
self.end_headers()
self.wfile.write(EXTM3U.encode('utf-8'))
self.finish()
elif 'live.m3u' in self.path:
args = parse_qs(urlparse(self.path).query);
cmd = args['channel'][0];
tmp = args['tmp'][0];
numportal = args['portal'][0];
portal = portals[numportal];
url = load_channels.retriveUrl(portal['mac'], portal['url'], portal['serial'], cmd, tmp);
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
self.finish()
elif 'epg.xml' in self.path:
args = parse_qs(urlparse(self.path).query);
numportal = args['portal'][0];
portal = portals[numportal];
try:
xml = load_channels.getEPG(portal['mac'], portal['url'], portal['serial'], addondir);
except Exception as e:
xml = '<?xml version="1.0" encoding="ISO-8859-1"?>'
xml += '<error>' + str(e) + '</error>';
self.send_response(200)
self.send_header('Content-type', 'txt/xml')
self.send_header('Connection', 'close')
self.send_header('Content-Length', len(xml))
self.end_headers()
self.wfile.write(xml)
self.finish()
elif 'stop' in self.path:
msg = 'Stopping ...';
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Connection', 'close')
self.send_header('Content-Length', len(msg))
self.end_headers()
self.wfile.write(msg.encode('utf-8'))
server.socket.close();
elif 'online' in self.path:
msg = 'Yes. I am.';
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Connection', 'close')
self.send_header('Content-Length', len(msg))
self.end_headers()
self.wfile.write(msg.encode('utf-8'))
else:
self.send_error(400,'Bad Request');
except IOError:
self.send_error(500,'Internal Server Error ' + str(IOError))
0
Example 80
Project: bitex Source File: main.py
def on_message(self, raw_message):
if self.honey_pot_connection:
self.application.log('INFO', "HONEY_POT", raw_message )
if self.trade_client is None or not self.trade_client.isConnected():
return
self.last_message_datetime.append(datetime.now())
message_time_last_second = self.last_message_datetime[-1] - timedelta(seconds=1)
for x in xrange(0, len(self.last_message_datetime)):
if self.last_message_datetime[x] > message_time_last_second:
self.last_message_datetime = self.last_message_datetime[x:]
break
if len(self.last_message_datetime) > 15: # higher than 15 messages per second
self.application.log("ERROR",
"TOO_MANY_MESSAGES",
"Exceed 15 messages per second. [ip=" + self.remote_ip + ",'" + raw_message + "']")
self.write_message(
'{"MsgType":"ERROR", "Description":"Too many messages per second", "Detail": "16 messages in the last second"}')
self.application.unregister_connection(self)
self.trade_client.close()
self.close()
return
try:
req_msg = JsonMessage(raw_message)
except InvalidMessageException as e:
self.write_message(
'{"MsgType":"ERROR", "Description":"Invalid message", "Detail": "' +
str(e) +
'"}')
self.application.unregister_connection(self)
self.trade_client.close()
self.close()
return
req_msg.set('RemoteIP' ,self.remote_ip)
if req_msg.isUserRequest():
if req_msg.has('Password'):
raw_message = raw_message.replace(req_msg.get('Password'), '*')
if req_msg.has('NewPassword'):
raw_message = raw_message.replace(req_msg.get('NewPassword'), '*')
self.application.log('IN', self.trade_client.connection_id ,raw_message )
if req_msg.isTestRequest() or req_msg.isHeartbeat():
dt = datetime.now()
response_msg = {
'MsgType' : '0',
'TestReqID' : req_msg.get('TestReqID'),
'ServerTimestamp' : int(mktime(dt.timetuple()) + dt.microsecond/1000.0 )
}
sendTime = req_msg.get('SendTime')
if sendTime:
response_msg['SendTime'] = sendTime
self.write_message(str(json.dumps(response_msg, cls=JsonEncoder)))
return
if req_msg.isTradeHistoryRequest(): # Trade History request
self.on_trade_history_request(req_msg)
return
if req_msg.isMarketDataRequest(): # Market Data Request
self.on_market_data_request(req_msg)
if not self.trade_client.isConnected():
self.application.log('DEBUG', self.trade_client.connection_id, 'not self.trade_client.isConnected()' )
self.application.unregister_connection(self)
self.trade_client.close()
self.close()
return
if req_msg.isSecurityStatusRequest():
self.on_security_status_request(req_msg)
return
if req_msg.isDepositRequest():
if not req_msg.get('DepositMethodID') and not req_msg.get('DepositID'):
currency = req_msg.get('Currency')
secret = uuid.uuid4().hex
callback_url = self.application.options.callback_url + secret
hot_wallet = self.get_broker_wallet('hot', currency)
cold_wallet = self.get_broker_wallet('cold', currency)
if not hot_wallet and not cold_wallet:
return
if not hot_wallet and cold_wallet:
dest_wallet = cold_wallet
elif hot_wallet and not cold_wallet:
dest_wallet = hot_wallet
else:
# 62.5% of all deposits go to the cold wallet, and 37.5% go to the hot wallet
dest_wallet = hot_wallet
if secret[0] in ('0','1','2','3','4','5','6','7','8','9'):
dest_wallet = cold_wallet
if not dest_wallet:
return
parameters = urllib.urlencode({
'method': 'create',
'address': dest_wallet,
'callback': callback_url,
'currency': currency
})
try:
url_payment_processor = self.application.options.url_payment_processor + '?' + parameters
self.application.log('DEBUG', self.trade_client.connection_id, "invoking..." + url_payment_processor )
response = urllib2.urlopen(url_payment_processor)
data = json.load(response)
self.application.log('DEBUG', self.trade_client.connection_id, str(data) )
req_msg.set('InputAddress', data['input_address'])
req_msg.set('Destination', data['destination'])
req_msg.set('Secret', secret)
except urllib2.HTTPError as e:
out_message = json.dumps({
'MsgType': 'ERROR',
'ReqID': req_msg.get('DepositReqID'),
'Description': 'Blockchain.info is not available at this moment, please try again within few minutes',
'Detail': str(e)
})
self.write_message(out_message)
return
except Exception as e:
out_message = json.dumps({
'MsgType': 'ERROR',
'ReqID': req_msg.get('DepositReqID'),
'Description': 'Error retrieving a new deposit address from Blockchain.info. Please, try again',
'Detail': str(e)
})
self.write_message(out_message)
return
try:
resp_message = self.trade_client.sendMessage(req_msg)
if resp_message:
self.write_message(resp_message.raw_message)
if resp_message and resp_message.isUserResponse():
self.user_response = resp_message
if self.is_user_logged():
self.application.log('LOGIN_OK', self.trade_client.connection_id, raw_message )
#TODO: Request open order list
#self.trade_client.
else:
self.application.log('LOGIN_FAILED', self.trade_client.connection_id, raw_message )
if not self.trade_client.isConnected():
self.application.log('DEBUG', self.trade_client.connection_id, 'not self.trade_client.isConnected()' )
self.application.unregister_connection(self)
self.trade_client.close()
self.close()
except TradeClientException as e:
exception_message = {
'MsgType': 'ERROR',
'Description': 'Invalid message',
'Detail': str(e)
}
self.write_message(json.dumps(exception_message))
self.application.unregister_connection(self)
self.trade_client.close()
self.close()
0
Example 81
def __init__(self, sqlrows,
linkto=None,
upload=None,
orderby=None,
headers={},
truncate=16,
columns=None,
th_link="",
**attributes):
# reverted since it causes errors (admin/user & manual importing of req/req/import)
# super(SQLTABLES3, self).__init__(**attributes)
TABLE.__init__(self, **attributes)
self.components = []
self.attributes = attributes
self.sqlrows = sqlrows
(components, row) = (self.components, [])
if not columns:
columns = sqlrows.colnames
if headers=="fieldname:capitalize":
headers = {}
for c in columns:
headers[c] = " ".join([w.capitalize() for w in c.split(".")[-1].split("_")])
elif headers=="labels":
headers = {}
for c in columns:
(t, f) = c.split(".")
field = sqlrows.db[t][f]
headers[c] = field.label
if headers!=None:
for c in columns:
if orderby:
row.append(TH(A(headers.get(c, c),
_href=th_link+"?orderby=" + c)))
else:
row.append(TH(headers.get(c, c)))
components.append(THEAD(TR(*row)))
tbody = []
for (rc, record) in enumerate(sqlrows):
row = []
if rc % 2 == 0:
_class = "even"
else:
_class = "odd"
for colname in columns:
if not table_field.match(colname):
if "_extra" in record and colname in record._extra:
r = record._extra[colname]
row.append(TD(r))
continue
else:
raise KeyError("Column %s not found (SQLTABLE)" % colname)
(tablename, fieldname) = colname.split(".")
try:
field = sqlrows.db[tablename][fieldname]
except KeyError:
field = None
if tablename in record \
and isinstance(record, Row) \
and isinstance(record[tablename], Row):
r = record[tablename][fieldname]
elif fieldname in record:
r = record[fieldname]
else:
raise SyntaxError("something wrong in Rows object")
r_old = r
if not field:
pass
elif linkto and field.type == "id":
#try:
#href = linkto(r, "table", tablename)
#except TypeError:
#href = "%s/%s/%s" % (linkto, tablename, r_old)
#r = A(r, _href=href)
try:
href = linkto(r)
except TypeError:
href = "%s/%s" % (linkto, r)
r = A(r, _href=href)
#elif linkto and field.type.startswith("reference"):
#ref = field.type[10:]
#try:
#href = linkto(r, "reference", ref)
#except TypeError:
#href = "%s/%s/%s" % (linkto, ref, r_old)
#if ref.find(".") >= 0:
#tref,fref = ref.split(".")
#if hasattr(sqlrows.db[tref],"_primarykey"):
#href = "%s/%s?%s" % (linkto, tref, urllib.urlencode({fref:r}))
#r = A(str(r), _href=str(href))
elif linkto \
and hasattr(field._table, "_primarykey") \
and fieldname in field._table._primarykey:
# have to test this with multi-key tables
key = urllib.urlencode(dict([ \
((tablename in record \
and isinstance(record, Row) \
and isinstance(record[tablename], Row)) \
and (k, record[tablename][k])) \
or (k, record[k]) \
for k in field._table._primarykey]))
r = A(r, _href="%s/%s?%s" % (linkto, tablename, key))
elif field.type.startswith("list:"):
r = field.represent(r or [])
elif field.represent:
r = field.represent(r)
elif field.type.startswith("reference"):
pass
elif field.type == "blob" and r:
r = "DATA"
elif field.type == "upload":
if upload and r:
r = A("file", _href="%s/%s" % (upload, r))
elif r:
r = "file"
else:
r = ""
elif field.type in ["string", "text"]:
r = str(field.formatter(r))
ur = unicode(r, "utf8")
if truncate!=None and len(ur) > truncate:
r = ur[:truncate - 3].encode("utf8") + "..."
row.append(TD(r))
tbody.append(TR(_class=_class, *row))
components.append(TBODY(*tbody))
0
Example 82
Project: ownCloud-for-KODI Source File: owncloud.py
def login(self):
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
# default User-Agent ('Python-urllib/2.6') will *not* work
opener.addheaders = [('User-Agent', self.user_agent)]
url = self.protocol + self.domain +'/'
try:
response = opener.open(url)
except urllib2.URLError, e:
xbmc.log(self.addon.getAddonInfo('name') + ': ' + str(e), xbmc.LOGERROR)
return
response_data = response.read()
response.close()
requestToken = None
# for cookie in self.cookiejar:
# for r in re.finditer(' ([^\=]+)\=([^\s]+)\s',
# str(cookie), re.DOTALL):
# cookieType,cookieValue = r.groups()
# if cookieType == 'oc_token':
# self.authorization.setToken('auth_token',cookieValue)
# elif cookieType != 'oc_remember_login' and cookieType != 'oc_username':
# self.authorization.setToken('auth_session',cookieType + '=' + cookieValue)
#
# return
#owncloud7
for r in re.finditer('name=\"requesttoken\" value\=\"([^\"]+)\"',
response_data, re.DOTALL):
requestToken = r.group(1)
if requestToken == None:
for r in re.finditer('data-requesttoken\=\"([^\"]+)\"',
response_data, re.DOTALL):
requestToken = r.group(1)
if requestToken != '':
self.authorization.setToken('auth_requesttoken',requestToken)
url = self.protocol + self.domain + '/index.php'
values = {
'password' : self.addon.getSetting(self.instanceName+'_password'),
'user' : self.authorization.username,
'remember_login' : 1,
'requesttoken' : requestToken,
'timezone-offset' : -4,
}
# try login
try:
response = opener.open(url,urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403:
#login denied
xbmcgui.Dialog().ok(self.addon.getLocalizedString(30000), self.addon.getLocalizedString(30017))
xbmc.log(self.addon.getAddonInfo('name') + ': ' + str(e), xbmc.LOGERROR)
return
response_data = response.read()
response.close()
loginResult = 0
#validate successful login
for r in re.finditer('(data-user)\=\"([^\"]+)\"',
response_data, re.DOTALL):
loginType,loginResult = r.groups()
if (loginResult == 0 or loginResult.lower() != self.authorization.username.lower()):
xbmcgui.Dialog().ok(self.addon.getLocalizedString(30000), self.addon.getLocalizedString(30017))
xbmc.log(self.addon.getAddonInfo('name') + ': ' + 'login failed', xbmc.LOGERROR)
return
if (self.version == self.OWNCLOUD_V82):
sessionString = ''
for cookie in self.cookiejar:
for r in re.finditer(' ([^\=]+)\=([^\s]+)\s',
str(cookie), re.DOTALL):
cookieType,cookieValue = r.groups()
if cookieType == 'oc_token':
self.authorization.setToken('auth_token',cookieValue)
elif cookieType != 'oc_remember_login' and cookieType != 'oc_username' and cookieType != 'oc_token' and cookieType != 'oc_token':
sessionString = str(sessionString) + str(cookieType) + '=' + str(cookieValue)+'; '
self.authorization.setToken('auth_session',sessionString)
else:
for cookie in self.cookiejar:
for r in re.finditer(' ([^\=]+)\=([^\s]+)\s',
str(cookie), re.DOTALL):
cookieType,cookieValue = r.groups()
if cookieType == 'oc_token':
self.authorization.setToken('auth_token',cookieValue)
elif cookieType != 'oc_remember_login' and cookieType != 'oc_username':
self.authorization.setToken('auth_session',cookieType + '=' + cookieValue)
return
0
Example 83
Project: frappe Source File: api.py
def handle():
"""
Handler for `/api` methods
### Examples:
`/api/method/{methodname}` will call a whitelisted method
`/api/resource/{doctype}` will query a table
examples:
- `?fields=["name", "owner"]`
- `?filters=[["Task", "name", "like", "%005"]]`
- `?limit_start=0`
- `?limit_page_length=20`
`/api/resource/{doctype}/{name}` will point to a resource
`GET` will return doclist
`POST` will insert
`PUT` will update
`DELETE` will delete
`/api/resource/{doctype}/{name}?run_method={method}` will run a whitelisted controller method
"""
form_dict = frappe.local.form_dict
authorization_header = frappe.get_request_header("Authorization").split(" ") if frappe.get_request_header("Authorization") else None
if authorization_header and authorization_header[0].lower() == "bearer":
token = authorization_header[1]
r = frappe.request
parsed_url = urlparse(r.url)
access_token = { "access_token": token}
uri = parsed_url.scheme + "://" + parsed_url.netloc + parsed_url.path + "?" + urlencode(access_token)
http_method = r.method
body = r.get_data()
headers = r.headers
required_scopes = frappe.db.get_value("OAuth Bearer Token", token, "scopes").split(";")
valid, oauthlib_request = oauth_server.verify_request(uri, http_method, body, headers, required_scopes)
if valid:
frappe.set_user(frappe.db.get_value("OAuth Bearer Token", token, "user"))
frappe.local.form_dict = form_dict
parts = frappe.request.path[1:].split("/",3)
call = doctype = name = None
if len(parts) > 1:
call = parts[1]
if len(parts) > 2:
doctype = parts[2]
if len(parts) > 3:
name = parts[3]
if call=="method":
frappe.local.form_dict.cmd = doctype
return frappe.handler.handle()
elif call=="resource":
if "run_method" in frappe.local.form_dict:
method = frappe.local.form_dict.pop("run_method")
doc = frappe.get_doc(doctype, name)
doc.is_whitelisted(method)
if frappe.local.request.method=="GET":
if not doc.has_permission("read"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
if not doc.has_permission("write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
frappe.db.commit()
else:
if name:
if frappe.local.request.method=="GET":
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.local.response.update({"data": doc})
if frappe.local.request.method=="PUT":
data = json.loads(frappe.local.form_dict.data)
doc = frappe.get_doc(doctype, name)
if "flags" in data:
del data["flags"]
# Not checking permissions here because it's checked in doc.save
doc.update(data)
frappe.local.response.update({
"data": doc.save().as_dict()
})
frappe.db.commit()
if frappe.local.request.method=="DELETE":
# Not checking permissions here because it's checked in delete_doc
frappe.delete_doc(doctype, name)
frappe.local.response.http_status_code = 202
frappe.local.response.message = "ok"
frappe.db.commit()
elif doctype:
if frappe.local.request.method=="GET":
if frappe.local.form_dict.get('fields'):
frappe.local.form_dict['fields'] = json.loads(frappe.local.form_dict['fields'])
frappe.local.form_dict.setdefault('limit_page_length', 20)
frappe.local.response.update({
"data": frappe.call(frappe.client.get_list,
doctype, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
data = json.loads(frappe.local.form_dict.data)
data.update({
"doctype": doctype
})
frappe.local.response.update({
"data": frappe.get_doc(data).insert().as_dict()
})
frappe.db.commit()
else:
raise frappe.DoesNotExistError
else:
raise frappe.DoesNotExistError
return build_response("json")
0
Example 84
Project: zorna Source File: api.py
def initialize_context(request):
ret = {}
ret['all_msg'] = request.REQUEST.get("all_msg", 'all')
ret['community_id'] = request.REQUEST.get("community_id", 0)
ret['search_string'] = request.REQUEST.get("search_string", '')
ret['search_string'] = ret['search_string'].encode('utf8')
ret['member_id'] = request.REQUEST.get("member_id", 0)
ret['message_id'] = request.REQUEST.get("message_id", 0)
ret['contenttype_id'] = request.REQUEST.get("contenttype_id", 0)
ret['current_query_string'] = urllib.urlencode(ret)
# Next lines must be after previous line
ret['com_page'] = request.REQUEST.get("com_page", 0)
ret['from_id'] = request.REQUEST.get("from_id", 0)
if ret['from_id'] == '':
ret['from_id'] = 0
else:
ret['from_id'] = int(ret['from_id'])
ao = set([])
ao_member = get_allowed_objects(request.user, Community, 'member')
ao = ao.union(set(ao_member))
ao_manage = get_allowed_objects(request.user, Community, 'manage')
ao = ao.union(set(ao_manage))
ret['communities'] = Community.objects.filter(id__in=ao).order_by('name')
ret['user_manager'] = True if len(ao_manage) else False
ccom = int(ret['community_id'])
ret['current_community'] = None
for com in ret['communities']:
com.manager = True if com.pk in ao_manage else False
com.member = True if com.pk in ao_member else False
if ccom != 0 and ccom == com.pk:
ret['current_community'] = com
if int(ret['community_id']) != 0 and ret['current_community'] and ret['current_community'] in ret['communities']:
communities = [ret['current_community']]
else:
communities = ret['communities']
ret['community_id'] = 0
data = []
ret['members'] = set([])
for com in communities:
ret['members'] = ret['members'].union(
set(get_acl_by_object(com, 'member')))
ret['members'] = ret['members'].union(
set(get_acl_by_object(com, 'manage')))
data.append([com.name, "g-%s" % str(com.id)])
data.extend([("%s %s" % (x.last_name, x.first_name), ("u-%s" % str(x.id)))
for x in ret['members']])
ret['members_count'] = len(ret['members'])
if int(ret['community_id']):
contributors = MessageCommunity.objects.values('owner').filter(
Q(communities=int(ret['community_id'])) |
Q(reply__communities=int(ret['community_id']))
).annotate(total=Count('owner')
)
else:
contributors = MessageCommunity.objects.values(
'owner').annotate(total=Count('owner'))
contributors = contributors.order_by('-total')[0:10]
ret['users_avatars'] = {}
for avatar in UserAvatar.objects.select_related().filter(user__pk__in=[u.pk for u in ret['members']] + [c['owner'] for c in contributors]):
ret['users_avatars'][avatar.user_id] = avatar
for m in ret['members']:
if ret['users_avatars'].has_key(m.pk):
m.avatar = ret['users_avatars'][m.pk]
else:
m.avatar = None
for c in contributors:
if ret['users_avatars'].has_key(c['owner']):
c['avatar'] = ret['users_avatars'][c['owner']]
c['user'] = ret['users_avatars'][c['owner']].user
else:
c['avatar'] = None
c['user'] = User.objects.get(pk=c['owner'])
ret['contributors'] = contributors
ret['members_data'] = simplejson.dumps(data)
if ret['current_community']:
ret['community_title'] = ret['current_community'].name
elif ret['all_msg'] == 'followed':
ret['community_title'] = _(u"Followed posts")
elif ret['all_msg'] == 'last':
ret['community_title'] = _(u"Recent messages")
elif ret['all_msg'] == 'tome':
ret['community_title'] = _(u"Direct to me")
elif ret['all_msg'] == 'contributor':
member = User.objects.get(pk=ret['member_id'])
ret['community_title'] = member.get_full_name()
elif ret['all_msg'] == 'contenttype':
ret['community_title'] = _(u"Content type")
elif ret['all_msg'] == 'message':
ret['community_title'] = _(u"Message")
else:
ret['community_title'] = _(u"All")
if int(ret['from_id']) != 0:
ret['msg_sender'] = User.objects.get(pk=int(ret['from_id']))
return ret
0
Example 85
Project: ganga Source File: feedback_report.py
def report(job=None):
""" Upload error reports (snapshot of configuration,job parameters, input/output files, command history etc.). Job argument is optional. """
import mimetypes
import urllib
import urllib2
import httplib
import string
import random
import sys
import os
import platform
import Ganga.GPIDev.Lib.Config.config as config
from Ganga.GPIDev.Base.VPrinter import full_print
import Ganga
# global variables that will print sumamry report to the user along with
# the download link
global JOB_REPORT, GANGA_VERSION, BACKEND_NAME, APPLICATION_NAME, PYTHON_PATH
JOB_REPORT = False
GANGA_VERSION = ''
BACKEND_NAME = ''
APPLICATION_NAME = ''
PYTHON_PATH = ''
def random_string(length):
return ''.join([random.choice(string.letters) for ii in range(length + 1)])
def encode_multipart_formdata(files):
boundary = random_string(30)
retnl = '\r\n'
lines = []
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
fields = {'title': 'Ganga Error Report'}
for (key, value) in fields.iteritems():
lines.append('--' + boundary)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for field_name, file in files.iteritems():
lines.append('--' + boundary)
lines.append(
'Content-Disposition: form-data; name="file"; filename="%s"' % (file))
lines.append('Content-Type: %s' % get_content_type(file))
lines.append('')
lines.append(open(file, 'rb').read())
lines.append('--' + boundary + '--')
lines.append('')
body = retnl.join(lines)
headers = {'content-type': 'multipart/form-data; boundary=%s' %
boundary, 'content-length': str(len(body))}
return body, headers
def make_upload_file(server):
def upload_file(path):
# print 'Uploading %r to %r' % (path, server)
data = {'MAX_FILE_SIZE': '3145728',
'sub': '',
'mode': 'regist'}
files = {'file': path}
send_post(server, files)
return upload_file
def send_post(url, files):
logger.debug("Sending Post to %s , containing %s" % (url, files))
encoded_data = encode_multipart_formdata(files)
data = urllib.urlencode(encoded_data[1])
req = urllib2.Request(url, data=data)
if req.has_data():
logger.debug("urllib2: Success!")
else:
logger.debug("urllib2: Fail!!!")
connection = httplib.HTTPConnection(req.get_host())
# connection.set_debuglevel(1)
logger.debug("Requesting: 'POST', %s, %s " % (url, encoded_data[1]))
# connection.request( method='POST', url=req.get_selector(), body=encoded_data[0], headers=encoded_data[1] )
connection.request(
method='POST', url=url, body=encoded_data[0], headers=encoded_data[1])
response = connection.getresponse()
logger.debug("httplib POST request response was: %s , because: %s" % (
response.status, response.reason))
responseResult = response.read()
#logger.debug("Responce.read(): --%s--" % responseResult )
responseResult = responseResult[
responseResult.find("<span id=\"download_path\""):]
startIndex = responseResult.find("path:") + 5
endIndex = responseResult.find("</span>")
logger.debug("Responce.read(): --%s--" %
responseResult[startIndex:endIndex])
logger.info(
'Your error report was uploaded to ganga developers with the following URL. ')
logger.info(
'You may include this URL and the following summary information in your bug report or in the support email to the developers.')
logger.info('')
logger.info('***' + str(responseResult[startIndex:endIndex]) + '***')
logger.info('')
global GANGA_VERSION, JOB_REPORT, APPLICATION_NAME, BACKEND_NAME, PYTHON_PATH
logger.info('Ganga Version : ' + GANGA_VERSION)
logger.info('Python Version : ' + "%s.%s.%s" %
(sys.version_info[0], sys.version_info[1], sys.version_info[2]))
logger.info('Operation System Version : ' + platform.platform())
if JOB_REPORT:
logger.info('Application Name : ' + APPLICATION_NAME)
logger.info('Backend Name : ' + BACKEND_NAME)
logger.info('Python Path : ' + PYTHON_PATH)
logger.info('')
JOB_REPORT = False
GANGA_VERSION = ''
BACKEND_NAME = ''
APPLICATION_NAME = ''
PYTHON_PATH = ''
def run_upload(server, path):
upload_file = make_upload_file(server)
upload_file(path)
def report_inner(job=None, isJob=False, isTask=False):
userInfoDirName = "userreport"
tempDirName = "reportsRepository"
# job relevant info
jobSummaryFileName = "jobsummary.txt"
jobFullPrintFileName = "jobfullprint.txt"
repositoryPath = "repository/$usr/LocalXML/6.0/jobs/$thousandsNumxxx"
# task relevant info
taskSummaryFileName = "tasksummary.txt"
taskFullPrintFileName = "taskfullprint.txt"
tasksRepositoryPath = "repository/$usr/LocalXML/6.0/tasks/$thousandsNumxxx"
# user's info
environFileName = "environ.txt"
userConfigFileName = "userconfig.txt"
defaultConfigFileName = "gangarc.txt"
ipythonHistoryFileName = "ipythonhistory.txt"
gangaLogFileName = "gangalog.txt"
jobsListFileName = "jobslist.txt"
tasksListFileName = "taskslist.txt"
thread_trace_file_name = 'thread_trace.html'
from Ganga.Utility import Config
uploadFileServer = Config.getConfig('Feedback')['uploadServer']
#uploadFileServer= "http://gangamon.cern.ch/django/errorreports/"
#uploadFileServer= "http://ganga-ai-02.cern.ch/django/errorreports/"
#uploadFileServer= "http://127.0.0.1:8000/errorreports"
def printDictionary(dictionary, file=sys.stdout):
for k, v in dictionary.iteritems():
print('%s: %s' % (k, v), file=file)
if k == 'PYTHONPATH':
global PYTHON_PATH
PYTHON_PATH = v
def extractFileObjects(fileName, targetDirectoryName):
try:
fileToRead = open(fileName, 'r')
try:
fileText = fileToRead.read()
import re
pattern = "File\(name=\'(.+?)\'"
matches = re.findall(pattern, fileText)
for fileName in matches:
fileName = os.path.expanduser(fileName)
targetFileName = os.path.join(
targetDirectoryName, os.path.basename(fileName))
shutil.copyfile(fileName, targetFileName)
finally:
fileToRead.close()
# except IOError, OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
def writeErrorLog(errorMessage):
try:
fileToWrite = open(errorLogPath, 'a')
try:
fileToWrite.write(errorMessage)
fileToWrite.write("\n")
except Exception as err:
logger.debug("Err: %s" % err)
raise
finally:
fileToWrite.close()
except Exception as err2:
logger.debug("Err: %s" % err2)
pass
def writeStringToFile(fileName, stringToWrite):
try:
# uncomment this to try the error logger
#fileName = '~/' + fileName
fileToWrite = open(fileName, 'w')
try:
fileToWrite.write(stringToWrite)
except Exception as err:
logger.debug("Err: %s" % err)
raise err
finally:
fileToWrite.close()
# except IOError:
except Exception as err:
logger.debug("Err2: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
def renameDataFiles(directory):
for fileName in os.listdir(directory):
fullFileName = os.path.join(directory, fileName)
if os.path.isfile(fullFileName):
if fileName == 'data':
os.rename(fullFileName, fullFileName + '.txt')
else:
renameDataFiles(fullFileName)
import shutil
import tarfile
import tempfile
import os
userHomeDir = os.getenv("HOME")
tempDir = tempfile.mkdtemp()
errorLogPath = os.path.join(tempDir, 'reportErrorLog.txt')
fullPathTempDir = os.path.join(tempDir, tempDirName)
fullLogDirName = ''
# create temp dir and specific dir for the job/user
try:
if not os.path.exists(fullPathTempDir):
os.mkdir(fullPathTempDir)
import datetime
now = datetime.datetime.now()
userInfoDirName = userInfoDirName + \
now.strftime("%Y-%m-%d-%H:%M:%S")
fullLogDirName = os.path.join(fullPathTempDir, userInfoDirName)
# if report directory exists -> delete it's content(we would like
# last version of the report)
if os.path.exists(fullLogDirName):
shutil.rmtree(fullLogDirName)
os.mkdir(fullLogDirName)
# except OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import os.environ in a file
fullEnvironFileName = os.path.join(fullLogDirName, environFileName)
try:
inputFile = open(fullEnvironFileName, 'w')
try:
printDictionary(os.environ, file=inputFile)
print('OS VERSION : ' + platform.platform(), file=inputFile)
finally:
inputFile.close()
# except IOError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import user config in a file
userConfigFullFileName = os.path.join(
fullLogDirName, userConfigFileName)
try:
inputFile = open(userConfigFullFileName, 'w')
try:
print("#GANGA_VERSION = %s" %
config.System.GANGA_VERSION, file=inputFile)
global GANGA_VERSION
GANGA_VERSION = config.System.GANGA_VERSION
# this gets the default values
# Ganga.GPIDev.Lib.Config.Config.print_config_file()
# this should get the changed values
for c in config:
print(config[c], file=inputFile)
finally:
inputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# write gangarc - default configuration
defaultConfigFullFileName = os.path.join(
fullLogDirName, defaultConfigFileName)
try:
outputFile = open(os.path.join(userHomeDir, '.gangarc'), 'r')
try:
writeStringToFile(defaultConfigFullFileName, outputFile.read())
finally:
outputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import ipython history in a file
try:
ipythonFile = open(
os.path.join(os.environ['IPYTHONDIR'], 'history'), 'r')
try:
lastIPythonCommands = ipythonFile.readlines()[-20:]
writeStringToFile(os.path.join(
fullLogDirName, ipythonHistoryFileName), '\n'.join(lastIPythonCommands))
#writeStringToFile(os.path.join(fullLogDirName, ipythonHistoryFileName), ipythonFile.read())
finally:
ipythonFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import gangalog in a file
userLogFileLocation = config["Logging"]._logfile
userLogFileLocation = os.path.expanduser(userLogFileLocation)
try:
gangaLogFile = open(userLogFileLocation, 'r')
try:
writeStringToFile(
os.path.join(fullLogDirName, gangaLogFileName), gangaLogFile.read())
finally:
gangaLogFile.close()
# except IOError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import the result of jobs command in the report
jobsListFullFileName = os.path.join(fullLogDirName, jobsListFileName)
try:
outputFile = open(jobsListFullFileName, 'w')
try:
from Ganga.Core.GangaRegistry import getRegistryProxy
print(getRegistryProxy('jobs'), file=outputFile)
finally:
outputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import the result of tasks command in the report
tasksListFullFileName = os.path.join(fullLogDirName, tasksListFileName)
try:
outputFile = open(tasksListFullFileName, 'w')
try:
from Ganga.Core.GangaRegistry import getRegistryProxy
print(getRegistryProxy('tasks'), file=outputFile)
finally:
outputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# save it here because we will change fullLogDirName, but we want this
# to be the archive and to be deleted
folderToArchive = fullLogDirName
# import job relevant info
if (job is not None and isJob):
global JOB_REPORT, APPLICATION_NAME, BACKEND_NAME
JOB_REPORT = True
APPLICATION_NAME = getName(job.application)
BACKEND_NAME = getName(job.backend)
# create job folder
jobFolder = 'job_%s' % job.fqid
fullLogDirName = os.path.join(fullLogDirName, jobFolder)
os.mkdir(fullLogDirName)
# import job summary in a file
fullJobSummaryFileName = os.path.join(
fullLogDirName, jobSummaryFileName)
writeStringToFile(fullJobSummaryFileName, job)
# import job full print in a file
fullJobPrintFileName = os.path.join(
fullLogDirName, jobFullPrintFileName)
try:
inputFile = open(fullJobPrintFileName, 'w')
try:
full_print(job, inputFile)
finally:
inputFile.close()
# except IOError, OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# extract file objects
try:
fileObjectsPath = os.path.join(fullLogDirName, 'fileobjects')
os.mkdir(fileObjectsPath)
extractFileObjects(fullJobSummaryFileName, fileObjectsPath)
# except OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy dir of the job ->input/output and subjobs
try:
parentDir, currentDir = os.path.split(job.inputdir[:-1])
workspaceDir = os.path.join(fullLogDirName, 'workspace')
shutil.copytree(parentDir, workspaceDir)
# except IOError, OSError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy shared area of the job
try:
if hasattr(job.application, 'is_prepared'):
if job.application.is_prepared is not None and job.application.is_prepared is not True:
import os
from Ganga.Utility.Config import getConfig
from Ganga.Utility.files import expandfilename
shared_path = os.path.join(expandfilename(getConfig(
'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
shareddir = os.path.join(
shared_path, job.application.is_prepared.name)
if os.path.isdir(shareddir):
sharedAreaDir = os.path.join(
fullLogDirName, 'sharedarea')
shutil.copytree(shareddir, sharedAreaDir)
# except IOError, OSError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy repository job file
try:
indexFileName = str(job.id) + '.index'
repositoryPath = repositoryPath.replace(
'$usr', os.getenv("USER"))
# check if the job is subjob -> different way of forming the
# path to the repository
is_subjob = job.fqid.find('.') > -1
if is_subjob:
jobid, subjobid = job.fqid.split(
'.')[0], job.fqid.split('.')[1]
repositoryPath = repositoryPath.replace(
'$thousandsNum', str(int(jobid) / 1000))
repositoryPath = os.path.join(repositoryPath, jobid)
else:
repositoryPath = repositoryPath.replace(
'$thousandsNum', str(job.id / 1000))
repositoryFullPath = os.path.join(
config.Configuration.gangadir, repositoryPath)
indexFileSourcePath = os.path.join(
repositoryFullPath, indexFileName)
repositoryFullPath = os.path.join(
repositoryFullPath, str(job.id))
repositoryTargetPath = os.path.join(
fullLogDirName, 'repository', str(job.id))
os.mkdir(os.path.join(fullLogDirName, 'repository'))
shutil.copytree(repositoryFullPath, repositoryTargetPath)
# data files are copied but can not be opened -> add .txt to
# their file names
renameDataFiles(repositoryTargetPath)
if not is_subjob:
# copy .index file
indexFileTargetPath = os.path.join(
fullLogDirName, 'repository', indexFileName)
shutil.copyfile(indexFileSourcePath, indexFileTargetPath)
# except OSError, IOError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import task relevant info
if (job is not None and isTask):
# job is actually a task object
task = job
# create task folder
taskFolder = 'task_%s' % task.id
fullLogDirName = os.path.join(fullLogDirName, taskFolder)
os.mkdir(fullLogDirName)
# import task summary in a file
fullTaskSummaryFileName = os.path.join(
fullLogDirName, taskSummaryFileName)
writeStringToFile(fullTaskSummaryFileName, str(task))
# import task full print in a file
fullTaskPrintFileName = os.path.join(
fullLogDirName, taskFullPrintFileName)
try:
inputFile = open(fullTaskPrintFileName, 'w')
try:
full_print(task, inputFile)
except Exception as err:
logger.debug("Err: %s" % err)
raise err
finally:
inputFile.close()
# except IOError, OSError:
except Exception as err:
logger.debug("Err2: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy shared area of the task
try:
if len(task.transforms) > 0:
if hasattr(task.transforms[0], 'application') and hasattr(task.transforms[0].application, 'is_prepared'):
if task.transforms[0].application.is_prepared is not None and task.transforms[0].application.is_prepared is not True:
import os
from Ganga.Utility.Config import getConfig
from Ganga.Utility.files import expandfilename
shared_path = os.path.join(expandfilename(getConfig(
'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
shareddir = os.path.join(
shared_path, task.transforms[0].application.is_prepared.name)
if os.path.isdir(shareddir):
sharedAreaDir = os.path.join(
fullLogDirName, 'sharedarea')
shutil.copytree(shareddir, sharedAreaDir)
# except IOError, OSError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy repository task file
try:
indexFileName = str(task.id) + '.index'
tasksRepositoryPath = tasksRepositoryPath.replace(
'$usr', os.getenv("USER"))
tasksRepositoryPath = tasksRepositoryPath.replace(
'$thousandsNum', str(task.id / 1000))
repositoryFullPath = os.path.join(
config.Configuration.gangadir, tasksRepositoryPath)
indexFileSourcePath = os.path.join(
repositoryFullPath, indexFileName)
repositoryFullPath = os.path.join(
repositoryFullPath, str(task.id))
repositoryTargetPath = os.path.join(
fullLogDirName, 'repository', str(task.id))
os.mkdir(os.path.join(fullLogDirName, 'repository'))
shutil.copytree(repositoryFullPath, repositoryTargetPath)
# data files are copied but can not be opened -> add .txt to
# their file names
renameDataFiles(repositoryTargetPath)
# copy .index file
indexFileTargetPath = os.path.join(
fullLogDirName, 'repository', indexFileName)
shutil.copyfile(indexFileSourcePath, indexFileTargetPath)
# except OSError, IOError:
except Exception as err:
logger.debug("Err %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# Copy thread stack trace file
try:
thread_trace_source_path = os.path.join(getConfig('Configuration')['gangadir'], thread_trace_file_name)
thread_trace_target_path = os.path.join(fullLogDirName, thread_trace_file_name)
shutil.copyfile(thread_trace_source_path, thread_trace_target_path)
except (OSError, IOError) as err:
logger.debug('Err %s', err)
writeErrorLog(str(sys.exc_info()[1]))
resultArchive = '%s.tar.gz' % folderToArchive
try:
resultFile = tarfile.TarFile.open(resultArchive, 'w:gz')
try:
resultFile.add(
folderToArchive, arcname=os.path.basename(folderToArchive))
# put the error log in the archive
if(os.path.exists(errorLogPath)):
resultFile.add(
errorLogPath, arcname=os.path.basename(errorLogPath))
except Exception as err:
logger.debug("Err: %s" % err)
raise
finally:
resultFile.close()
except Exception as err:
logger.debug("Err2: %s" % err)
raise # pass
# remove temp dir
if(os.path.exists(folderToArchive)):
shutil.rmtree(folderToArchive)
# print the error if there is something
if os.path.exists(errorLogPath):
logger.error('')
logger.error('An error occured while collecting report information : ' + open(errorLogPath, 'r').read())
logger.error('')
# delete the errorfile from user's pc
if(os.path.exists(errorLogPath)):
os.remove(errorLogPath)
# return the path to the archive and the path to the upload server
return (resultArchive, uploadFileServer, tempDir)
def removeTempFiles(tempDir):
import shutil
# remove temp dir
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
# remove temp files from django upload-> if the file is bigger than 2.5
# mb django internally stores it in tmp file during the upload
userTempDir = '/tmp/'
for fileName in os.listdir(userTempDir):
if fileName.find('.upload') > -1:
os.remove(os.path.join(userTempDir, fileName))
tempDir = ''
# call the report function
try:
isJob = isTask = False
# make typecheck of the param passed
if job is not None:
from Ganga.GPIDev.Lib.Job.Job import Job
from Ganga.GPIDev.Base.Proxy import stripProxy
isJob = isinstance(stripProxy(job), Job)
if hasattr(stripProxy(job), '_category') and (stripProxy(job)._category == 'tasks'):
isTask = True
if not (isJob or isTask):
logger.error("report() function argument should be reference to a job or task object")
return
resultArchive, uploadFileServer, tempDir = report_inner(
job, isJob, isTask)
report_bytes = os.path.getsize(resultArchive)
if report_bytes > 1024 * 1024 * 100: # if bigger than 100MB
logger.error(
'The report is bigger than 100MB and can not be uploaded')
else:
run_upload(server=uploadFileServer, path=resultArchive)
except Exception as err:
logger.debug("Err: %s" % err)
removeTempFiles(tempDir)
raise # pass
0
Example 86
def request(method, url, params=None, data=None, headers=None, cookies=None,
files=None, auth=None, timeout=60, allow_redirects=False,
stream=False):
"""Initiate an HTTP(S) request. Returns :class:`Response` object.
:param method: 'GET' or 'POST'
:type method: ``unicode``
:param url: URL to open
:type url: ``unicode``
:param params: mapping of URL parameters
:type params: :class:`dict`
:param data: mapping of form data ``{'field_name': 'value'}`` or
:class:`str`
:type data: :class:`dict` or :class:`str`
:param headers: HTTP headers
:type headers: :class:`dict`
:param cookies: cookies to send to server
:type cookies: :class:`dict`
:param files: files to upload (see below).
:type files: :class:`dict`
:param auth: username, password
:type auth: ``tuple``
:param timeout: connection timeout limit in seconds
:type timeout: ``int``
:param allow_redirects: follow redirections
:type allow_redirects: ``Boolean``
:param stream: Stream content instead of fetching it all at once.
:type stream: ``bool``
:returns: :class:`Response` object
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
* ``fieldname`` is the name of the field in the HTML form.
* ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
be used to guess the mimetype, or ``application/octet-stream``
will be used.
"""
# TODO: cookies
socket.setdefaulttimeout(timeout)
# Default handlers
openers = []
if not allow_redirects:
openers.append(NoRedirectHandler())
if auth is not None: # Add authorisation handler
username, password = auth
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
openers.append(auth_manager)
# Install our custom chain of openers
opener = urllib2.build_opener(*openers)
urllib2.install_opener(opener)
if not headers:
headers = CaseInsensitiveDictionary()
else:
headers = CaseInsensitiveDictionary(headers)
if 'user-agent' not in headers:
headers['user-agent'] = USER_AGENT
# Accept gzip-encoded content
encodings = [s.strip() for s in
headers.get('accept-encoding', '').split(',')]
if 'gzip' not in encodings:
encodings.append('gzip')
headers['accept-encoding'] = ', '.join(encodings)
# Force POST by providing an empty data string
if method == 'POST' and not data:
data = ''
if files:
if not data:
data = {}
new_headers, data = encode_multipart_formdata(data, files)
headers.update(new_headers)
elif data and isinstance(data, dict):
data = urllib.urlencode(str_dict(data))
# Make sure everything is encoded text
headers = str_dict(headers)
if isinstance(url, unicode):
url = url.encode('utf-8')
if params: # GET args (POST args are handled in encode_multipart_formdata)
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if query: # Combine query string and `params`
url_params = urlparse.parse_qs(query)
# `params` take precedence over URL query string
url_params.update(params)
params = url_params
query = urllib.urlencode(str_dict(params), doseq=True)
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
req = urllib2.Request(url, data, headers)
return Response(req, stream)
0
Example 87
Project: youtrack-githook Source File: connection.py
def importIssues(self, projectId, assigneeGroup, issues):
""" Import issues, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Issues)
Accepts retrun of getIssues()
Example: importIssues([{'numberInProject':'1', 'summary':'some problem', 'description':'some description', 'priority':'1',
'fixedVersion':['1.0', '2.0'],
'comment':[{'author':'yamaxim', 'text':'comment text', 'created':'1267030230127'}]},
{'numberInProject':'2', 'summary':'some problem', 'description':'some description', 'priority':'1'}])
"""
if len(issues) <= 0:
return
bad_fields = ['id', 'projectShortName', 'votes', 'commentsCount',
'historyUpdated', 'updatedByFullName', 'updaterFullName',
'reporterFullName', 'links', 'attachments', 'jiraId']
tt_settings = self.getProjectTimeTrackingSettings(projectId)
if tt_settings and tt_settings.Enabled and tt_settings.TimeSpentField:
bad_fields.append(tt_settings.TimeSpentField)
xml = '<issues>\n'
issue_records = dict([])
for issue in issues:
record = ""
record += ' <issue>\n'
comments = None
if getattr(issue, "getComments", None):
comments = issue.getComments()
for issueAttr in issue:
attrValue = issue[issueAttr]
if attrValue is None:
continue
if isinstance(attrValue, unicode):
attrValue = attrValue.encode('utf-8')
if isinstance(issueAttr, unicode):
issueAttr = issueAttr.encode('utf-8')
if issueAttr == 'comments':
comments = attrValue
else:
# ignore bad fields from getIssue()
if issueAttr not in bad_fields:
record += ' <field name="' + issueAttr + '">\n'
if isinstance(attrValue, list) or getattr(attrValue, '__iter__', False):
for v in attrValue:
if isinstance(v, unicode):
v = v.encode('utf-8')
record += ' <value>' + escape(v.strip()) + '</value>\n'
else:
record += ' <value>' + escape(attrValue.strip()) + '</value>\n'
record += ' </field>\n'
if comments:
for comment in comments:
record += ' <comment'
for ca in comment:
val = comment[ca]
if isinstance(ca, unicode):
ca = ca.encode('utf-8')
if isinstance(val, unicode):
val = val.encode('utf-8')
record += ' ' + ca + '=' + quoteattr(val)
record += '/>\n'
record += ' </issue>\n'
xml += record
issue_records[issue.numberInProject] = record
xml += '</issues>'
#print xml
#TODO: convert response xml into python objects
if isinstance(xml, unicode):
xml = xml.encode('utf-8')
if isinstance(assigneeGroup, unicode):
assigneeGroup = assigneeGroup.encode('utf-8')
url = '/import/' + urlquote(projectId) + '/issues?' + urllib.urlencode({'assigneeGroup': assigneeGroup})
if isinstance(url, unicode):
url = url.encode('utf-8')
result = self._reqXml('PUT', url, xml, 400)
if (result == "") and (len(issues) > 1):
for issue in issues:
self.importIssues(projectId, assigneeGroup, [issue])
response = ""
try:
response = result.toxml().encode('utf-8')
except:
sys.stderr.write("can't parse response")
sys.stderr.write("request was")
sys.stderr.write(xml)
return response
item_elements = minidom.parseString(response).getElementsByTagName("item")
if len(item_elements) != len(issues):
sys.stderr.write(response)
else:
for item in item_elements:
id = item.attributes["id"].value
imported = item.attributes["imported"].value.lower()
if imported == "true":
print "Issue [ %s-%s ] imported successfully" % (projectId, id)
else:
sys.stderr.write("")
sys.stderr.write("Failed to import issue [ %s-%s ]." % (projectId, id))
sys.stderr.write("Reason : ")
sys.stderr.write(item.toxml())
sys.stderr.write("Request was :")
if isinstance(issue_records[id], unicode):
sys.stderr.write(issue_records[id].encode('utf-8'))
else:
sys.stderr.write(issue_records[id])
print ""
return response
0
Example 88
Project: canvas Source File: views.py
def get_signup_context(request, skip_invite_code=None, template="user/signup.django.html",
cookies_to_set={}, cookies_to_delete=[]):
"""
Returns an error context (or dict) if the signup is not successful.
Returns `None` for successful signups.
`cookies_to_set` and `cookies_to_delete` should be passed empty so that this functin may append to them.
`cookies_to_set` is for session cookies, to tie into after_signup.py / after_signup.js.
"""
skip_invite_code = skip_invite_code or request.GET.get('skip_invite_code', '').lower()
bypass_copy = settings.SHORT_CODE_COPY.get(skip_invite_code)
skippable_codes = (['dicksoup', 'angelgate', 'friends_and_family', 'herpderp', 'fffffat', 'buzzfeedbrews']
+ settings.SHORT_CODES)
login_url = '/login'
if request.REQUEST.get('next'):
next = request.REQUEST['next']
params = [urllib.urlencode({'next': next})]
if request.method == 'POST':
next_params = request.POST.get('next_params', '')
else:
next_params = request.GET.copy()
del next_params['next']
next_params = urllib.urlencode(next_params)
if next_params:
params.append(next_params)
login_url = login_url + '?' + u'&'.join(params)
try:
fb_user, fb_api = util.get_fb_api(request)
except NotLoggedIntoFacebookError:
fb_user, fb_api = None, None
fb_uid = fb_user.get('uid') if fb_user else None
fb_invite = None
if request.COOKIES.get('fb_message_id'):
fb_invite = FacebookInvite.objects.get_or_none(fb_message_id=request.COOKIES.get('fb_message_id'))
cookies_to_delete.append('fb_message_id')
if not fb_invite and fb_uid:
fb_invite = FacebookInvite.get_invite(fb_user.get('uid'))
if request.method == 'GET':
return locals()
username = request.POST.get('username', '')
password = request.POST.get('password', '')
email = request.POST.get('email', '')
if not fb_uid:
fb_uid = request.POST.get('facebook_id', None)
code = InviteCode.objects.get_or_none(code=request.POST.get('code'))
def error(message, context=locals()):
context['message'] = message
Metrics.signup_form_invalid.record(request)
return context
if check_rate_limit(request, username):
return error("Too many failed signup attempts. Wait a minute and try again.")
if not password:
return error("Password required.")
if not User.validate_password(password):
return error("Sorry, your password is too short. Please use 5 or more characters.")
error_msg = User.validate_username(username)
if error_msg:
return error(error_msg)
if not User.validate_email(email):
return error("Please enter a valid email address.")
if not User.email_is_unused(email):
return error("This email address is already in use. Try <a href='/login'>signing in</a> "
"or <a href='/password_reset'>resetting</a> your password if you've forgotten it.")
if fb_uid and not UserInfo.facebook_is_unused(fb_uid):
return error("This Facebook account is already in use. Try <a href='/login'>signing in</a> "
"or <a href='/password_reset'>resetting</a> your password if you've forgotten it.")
try:
user = User.objects.create_user(username, email, password)
except IntegrityError:
return error("Username taken.")
if not fb_uid:
fb_uid = None
UserInfo(user=user, invite_bypass=skip_invite_code,
facebook_id=fb_uid, enable_timeline=True).save()
if code:
code.invitee = user
code.save()
if fb_invite:
fb_invite.invitee = user
fb_invite.save()
user = auth.authenticate(username=username, password=password)
# Handle following featured groups and optionally one defined by their short code.
if skip_invite_code:
autofollow = settings.SHORT_CODE_AUTOFOLLOW.get(skip_invite_code)
if autofollow:
to_follow.append(autofollow)
economy.grant_daily_free_stickers(request.user, force=True, count=knobs.SIGNUP_FREE_STICKERS)
# Follow the Canvas account.
try:
user.redis.following.sadd(User.objects.get(username=settings.CANVAS_ACCOUNT_USERNAME).id)
except User.DoesNotExist:
pass
# Logged-out remix?
cookie_key, post_data = after_signup.get_posted_comment(request)
if post_data:
post_comment(request, user, post_data)
cookies_to_delete.append(cookie_key)
inviter_id = request.session.get('inviter')
if inviter_id:
user.kv.inviter = inviter_id
del request.session['inviter']
inviter = User.objects.get(pk=inviter_id)
user.follow(inviter)
inviter.follow(user)
# DEPRECATED. Use after_signup.py / after_signup.js now instead.
extra_info = request.POST.get("info")
if extra_info:
extra_info = util.loads(extra_info)
if extra_info.get('in_flow') == 'yes':
fact.record('flow_signup', request, {})
# A user may have come to signup by remixing/replying, and we've got their post data to submit and send them
# to.
if not post_data:
post_data = extra_info.get('post')
if post_data:
post_comment(request, user, post_data)
old_session_key = request.session.session_key
def _after_signup():
if fb_api:
app_requests = fb_api.get_object('/me/apprequests/').get('data', [])
for app_request in app_requests:
if id in app_request:
fb.delete_object(app_request['id'])
Metrics.signup.record(request, old_session_key=old_session_key, username=username, email=email)
if 'failed_signup' in request.session:
del request.session['failed_signup']
Metrics.signup_second_try.record(request)
if template == 'signup/_signup_prompt.html':
Metrics.signup_prompt.record(request)
else:
Metrics.signup_main.record(request)
bgwork.defer(_after_signup)
# auth.login starts a new session and copies the session data from the old one to the new one
auth.login(request, user)
experiments.migrate_from_request_to_user(request, user)
0
Example 89
Project: simian Source File: appengine_rpc_httplib2.py
def Send(self, request_path, payload='',
content_type='application/octet-stream',
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
Raises:
AuthPermanentFail: If authorization failed in a permanent way.
urllib2.HTTPError: On most HTTP errors.
"""
self.http = httplib2.Http(
cache=self.memory_cache, ca_certs=self.certpath,
disable_ssl_certificate_validation=(not self.cert_file_available))
self.http.follow_redirects = False
self.http.timeout = timeout
url = '%s://%s%s' % (self.scheme, self.host, request_path)
if kwargs:
url += '?' + urllib.urlencode(sorted(kwargs.items()))
headers = {}
if self.extra_headers:
headers.update(self.extra_headers)
headers['X-appcfg-api-version'] = '1'
if payload is not None:
method = 'POST'
headers['content-length'] = str(len(payload))
headers['Content-Type'] = content_type
else:
method = 'GET'
if self.host_override:
headers['Host'] = self.host_override
rpc_errors = 0
auth_errors = [0]
conflict_errors = 0
def NeedAuth():
"""Marker that we need auth; it'll actually be tried next time around."""
auth_errors[0] += 1
logger.debug('Attempting to auth. This is try %s of %s.',
auth_errors[0], self.auth_max_errors)
if auth_errors[0] > self.auth_max_errors:
RaiseHttpError(url, response_info, response, 'Too many auth attempts.')
while (rpc_errors < self.rpc_max_errors and
conflict_errors < self.conflict_max_errors):
self._Authenticate(self.http, auth_errors[0] > 0)
logger.debug('Sending request to %s headers=%s body=%s',
url, headers,
self.debug_data and payload or payload and 'ELIDED' or '')
try:
response_info, response = self.http.request(
url, method=method, body=payload, headers=headers)
except client.AccessTokenRefreshError, e:
logger.info('Got access token error', exc_info=1)
response_info = httplib2.Response({'status': 401})
response_info.reason = str(e)
response = ''
status = response_info.status
if status == 200:
return response
logger.debug('Got http error %s.', response_info.status)
if status == 401:
NeedAuth()
continue
elif status == 409:
conflict_errors += 1
wait_time = random.randint(0, 10)
logger.debug('Got conflict error %s of %s. Retrying in %s seconds.',
conflict_errors, self.conflict_max_errors, wait_time)
time.sleep(wait_time)
continue
elif status >= 500 and status < 600:
rpc_errors += 1
logger.debug('Retrying. This is attempt %s of %s.',
rpc_errors, self.rpc_max_errors)
continue
elif status == 302:
loc = response_info.get('location')
logger.debug('Got 302 redirect. Location: %s', loc)
if (loc.startswith('https://www.google.com/accounts/ServiceLogin') or
re.match(r'https://www\.google\.com/a/[a-z0-9.-]+/ServiceLogin',
loc)):
NeedAuth()
continue
elif loc.startswith('http://%s/_ah/login' % (self.host,)):
RaiseHttpError(url, response_info, response,
'dev_appserver login not supported')
else:
RaiseHttpError(url, response_info, response,
'Unexpected redirect to %s' % loc)
else:
logger.debug('Unexpected results: %s', response_info)
RaiseHttpError(url, response_info, response,
'Unexpected HTTP status %s' % status)
logging.info('Too many retries for url %s', url)
RaiseHttpError(url, response_info, response)
0
Example 90
def request(method, url, params=None, data=None, headers=None, cookies=None,
files=None, auth=None, timeout=60, allow_redirects=False,
stream=False):
"""Initiate an HTTP(S) request. Returns :class:`Response` object.
:param method: 'GET' or 'POST'
:type method: ``unicode``
:param url: URL to open
:type url: ``unicode``
:param params: mapping of URL parameters
:type params: :class:`dict`
:param data: mapping of form data ``{'field_name': 'value'}`` or
:class:`str`
:type data: :class:`dict` or :class:`str`
:param headers: HTTP headers
:type headers: :class:`dict`
:param cookies: cookies to send to server
:type cookies: :class:`dict`
:param files: files to upload (see below).
:type files: :class:`dict`
:param auth: username, password
:type auth: ``tuple``
:param timeout: connection timeout limit in seconds
:type timeout: ``int``
:param allow_redirects: follow redirections
:type allow_redirects: ``Boolean``
:param stream: Stream content instead of fetching it all at once.
:type stream: ``bool``
:returns: :class:`Response` object
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
* ``fieldname`` is the name of the field in the HTML form.
* ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
be used to guess the mimetype, or ``application/octet-stream``
will be used.
"""
# TODO: cookies
socket.setdefaulttimeout(timeout)
# Default handlers
openers = []
if not allow_redirects:
openers.append(NoRedirectHandler())
if auth is not None: # Add authorisation handler
username, password = auth
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
openers.append(auth_manager)
# Install our custom chain of openers
opener = urllib2.build_opener(*openers)
urllib2.install_opener(opener)
if not headers:
headers = CaseInsensitiveDictionary()
else:
headers = CaseInsensitiveDictionary(headers)
if 'user-agent' not in headers:
headers['user-agent'] = USER_AGENT
# Accept gzip-encoded content
encodings = [s.strip() for s in
headers.get('accept-encoding', '').split(',')]
if 'gzip' not in encodings:
encodings.append('gzip')
headers['accept-encoding'] = ', '.join(encodings)
# Force POST by providing an empty data string
if method == 'POST' and not data:
data = ''
if files:
if not data:
data = {}
new_headers, data = encode_multipart_formdata(data, files)
headers.update(new_headers)
elif data and isinstance(data, dict):
data = urllib.urlencode(str_dict(data))
# Make sure everything is encoded text
headers = str_dict(headers)
if isinstance(url, unicode):
url = url.encode('utf-8')
if params: # GET args (POST args are handled in encode_multipart_formdata)
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if query: # Combine query string and `params`
url_params = urlparse.parse_qs(query)
# `params` take precedence over URL query string
url_params.update(params)
params = url_params
query = urllib.urlencode(str_dict(params), doseq=True)
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
req = urllib2.Request(url, data, headers)
return Response(req, stream)
0
Example 91
Project: rust-ci Source File: views.py
def github_callback(request):
state = request.session.get('state')
state_param = request.GET['state']
if not state or state_param != state:
logger.error('github_callback failed, no state given or ' +
'not matching. session={}, param={}'.format(state, state_param))
return HttpResponse('Unauthorized', status=401)
project_id = request.session['project_id']
project = Project.objects.get(pk = project_id)
auth_reason = request.session['auth_reason']
request.session.clear()
data = {
'client_id': private_settings.GITHUB_CLIENT_ID,
'client_secret': private_settings.GITHUB_CLIENT_SECRET,
'code': request.GET['code'],
'state': request.GET['state']
}
req = urllib2.Request('https://github.com/login/oauth/access_token',
urlencode(data))
req.add_header('Accept', 'application/json')
response = json.loads(urllib2.urlopen(req).read())
if 'access_token' in response and response['access_token']:
github_token = response['access_token']
req = urllib2.Request('https://api.github.com/user')
req.add_header('Accept', 'application/json')
req.add_header('Authorization', 'token {}'.
format(github_token))
github_user = json.loads(urllib2.urlopen(req).read())
#print(json.dumps(github_user, sort_keys=True, indent=4))
# Get organizations for user (to allow members of orgs to
# add projects on behalf of their organization)
orgs_req = urllib2.Request(github_user['organizations_url'])
orgs_req.add_header('Accept', 'application/json')
orgs_req.add_header('Authorization', 'token {}'.
format(github_token))
github_user_orgs = json.loads(urllib2.urlopen(orgs_req).read())
#print(json.dumps(github_user_orgs, sort_keys=True, indent=4))
is_authorized = False
# Check that we got token for the right user or organization
if project.username == github_user['login']:
is_authorized = True
else:
for github_org in github_user_orgs:
if project.username == github_org['login']:
is_authorized = True
break
if not is_authorized:
if auth_reason == 'add_project':
# Unable to authorize when adding, delete
project.delete()
error_message = 'Neither authenticated GitHub user ({}) \
or that users organizations matches project \
owner ({})'.format(github_user['login'],
project.username)
return index(request, error_message)
if auth_reason == 'delete_project':
if not settings.DEBUG:
mail_message = "{}/{} - {}\n\n".\
format(project.username, project.repository,
project.branch)
mail_admins('Project deleted', mail_message)
project.mark_project_deleted()
return HttpResponseRedirect(reverse('ppatrigger.views.index'))
else:
travis_token = travisclient.get_travis_token(github_token)
if travis_token:
if auth_reason == 'add_project':
project.auth_token = travis_token
project.save()
if not settings.DEBUG:
mail_message = "{}/{} - {}\n\n".\
format(project.username, project.repository,
project.branch)
mail_admins('Project added', mail_message)
elif auth_reason == 'get_auth_token':
# Used if initial auth failed for some reason
# (i.e. no auth_token in db)
project.auth_token = travis_token
project.save()
if not settings.DEBUG:
mail_message = "{}/{} - {}\n\n".\
format(project.username, project.repository,
project.branch)
mail_admins('Project authenticated', mail_message)
elif auth_reason == 'trigger_rebuild':
project.auth_token = travis_token
project.build_requested = True
project.save()
elif auth_reason == 'edit_project':
request.session['session_auth'] = project.rustci_token
return HttpResponseRedirect(reverse(
'project.action.edit_project',
args=(project.id,)))
return HttpResponseRedirect(reverse('ppatrigger.views.index'))
else:
error_message = 'Error in response from Travis CI'
else:
if auth_reason == 'add_project':
# Unable to authorize when adding, delete
project.delete()
error_message = 'Error in response from GitHub: {}'.\
format(response.get('error'))
return index(request, error_message)
0
Example 92
Project: gfw-api Source File: discovery.py
def createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, futureDesc, schema):
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
mediaPathUrl = mediaUpload['protocols']['simple']['path']
mediaResumablePathUrl = mediaUpload['protocols']['resumable']['path']
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH']:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
if kwargs[name] not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, kwargs[name], str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
headers = {}
headers, params, query, body = self._model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Convert a simple filename into a MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError(
'media_filename must be str or MediaUpload. Got %s' % type(media_upload))
if media_upload.resumable():
resumable = media_upload
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
if media_upload.resumable():
expanded_url = uritemplate.expand(mediaResumablePathUrl, params)
else:
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
expanded_url = uritemplate.expand(mediaResumablePathUrl, params)
if not media_upload.resumable():
body = media_upload.getbytes(0, media_upload.size())
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
if media_upload.resumable():
# This is a multipart resumable upload, where a multipart payload
# looks like this:
#
# --===============1678050750164843052==
# Content-Type: application/json
# MIME-Version: 1.0
#
# {'foo': 'bar'}
# --===============1678050750164843052==
# Content-Type: image/png
# MIME-Version: 1.0
# Content-Transfer-Encoding: binary
#
# <BINARY STUFF>
# --===============1678050750164843052==--
#
# In the case of resumable multipart media uploads, the <BINARY
# STUFF> is large and will be spread across multiple PUTs. What we
# do here is compose the multipart message with a random payload in
# place of <BINARY STUFF> and then split the resulting content into
# two pieces, text before <BINARY STUFF> and text after <BINARY
# STUFF>. The text after <BINARY STUFF> is the multipart boundary.
# In apiclient.http the HttpRequest will send the text before
# <BINARY STUFF>, then send the actual binary media in chunks, and
# then will send the multipart delimeter.
payload = hex(random.getrandbits(300))
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
body, _ = body.split(payload)
resumable = media_upload
else:
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
logging.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
self._model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
for arg in argmap.iterkeys():
if arg in STACK_QUERY_PARAMETERS:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethodFromFuture(theclass, methodName, methodDesc, futureDesc):
""" This is a legacy method, as only Buzz and Moderator use the future.json
functionality for generating _next methods. It will be kept around as long
as those API versions are around, but no new APIs should depend upon it.
"""
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous):
"""Retrieve the next page of results.
Takes a single argument, 'body', which is the results
from the last call, and returns the next set of items
in the collection.
Returns:
None if there are no more items in the collection.
"""
if futureDesc['type'] != 'uri':
raise UnknownLinkType(futureDesc['type'])
try:
p = previous
for key in futureDesc['location']:
p = p[key]
url = p
except (KeyError, TypeError):
return None
url = _add_query_parameter(url, 'key', self._developerKey)
headers = {}
headers, params, query, body = self._model.request(headers, {}, {}, None)
logging.info('URL being requested: %s' % url)
resp, content = self._http.request(url, method='GET', headers=headers)
return self._requestBuilder(self._http,
self._model.response,
url,
method='GET',
headers=headers,
methodId=methodId)
setattr(theclass, methodName, methodNext)
def createNextMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logging.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if futureDesc:
future = futureDesc['methods'].get(methodName, {})
else:
future = None
createMethod(Resource, methodName, methodDesc, future)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
def methodResource(self):
return createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, futureDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
if futureDesc and 'resources' in futureDesc:
future = futureDesc['resources'].get(methodName, {})
else:
future = {}
createResourceMethod(Resource, methodName, methodDesc, future)
# Add <m>_next() methods to Resource
if futureDesc and 'methods' in futureDesc:
for methodName, methodDesc in futureDesc['methods'].iteritems():
if 'next' in methodDesc and methodName in resourceDesc['methods']:
createNextMethodFromFuture(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodDesc['next'])
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema[responseSchema['$ref']]
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
0
Example 93
def __init__(
self,
sqlrows,
linkto=None,
upload=None,
orderby=None,
headers={},
truncate=16,
columns=None,
**attributes
):
TABLE.__init__(self, **attributes)
self.components = []
self.attributes = attributes
self.sqlrows = sqlrows
(components, row) = (self.components, [])
if not orderby:
for c in sqlrows.colnames:
if not columns or c in columns:
row.append(TH(headers.get(c, c)))
else:
for c in sqlrows.colnames:
if not columns or c in columns:
row.append(TH(A(headers.get(c, c),
_href='?orderby=' + c)))
components.append(THEAD(TR(*row)))
tbody = []
for (rc, record) in enumerate(sqlrows):
row = []
if rc % 2 == 0:
_class = 'even'
else:
_class = 'odd'
for colname in sqlrows.colnames:
if columns and not colname in columns:
continue
if not table_field.match(colname):
r = record._extra[colname]
row.append(TD(r))
continue
(tablename, fieldname) = colname.split('.')
field = sqlrows.db[tablename][fieldname]
if tablename in record and isinstance(record,
Row) and isinstance(record[tablename],
Row):
r = record[tablename][fieldname]
elif fieldname in record:
r = record[fieldname]
else:
raise SyntaxError, 'something wrong in SQLRows object'
r_old = r
if field.represent:
r = field.represent(r)
if not isinstance(r,str):
row.append(TD(r))
continue
if field.type == 'blob' and r:
row.append(TD('DATA'))
continue
r = str(field.formatter(r))
if field.type == 'upload':
if upload and r:
row.append(TD(A('file', _href='%s/%s' % (upload, r))))
elif r:
row.append(TD('file'))
else:
row.append(TD())
continue
ur = unicode(r, 'utf8')
if len(ur) > truncate:
r = ur[:truncate - 3].encode('utf8') + '...'
if linkto and field.type == 'id':
try:
href = linkto(r, 'table', tablename)
except TypeError:
href = '%s/%s/%s' % (linkto, tablename, r_old)
row.append(TD(A(r, _href=href)))
elif linkto and str(field.type).startswith('reference'):
ref = field.type[10:]
try:
href = linkto(r, 'reference', ref)
except TypeError:
href = '%s/%s/%s' % (linkto, ref, r_old)
if ref.find('.') >= 0:
tref,fref = ref.split('.')
if hasattr(sqlrows.db[tref],'_primarykey'):
href = '%s/%s?%s' % (linkto, tref, urllib.urlencode({fref:ur}))
row.append(TD(A(r, _href=href)))
elif linkto and hasattr(field._table,'_primarykey') and fieldname in field._table._primarykey:
# have to test this with multi-key tables
key = urllib.urlencode(dict( [ \
((tablename in record \
and isinstance(record, Row) \
and isinstance(record[tablename], Row)) and
(k, record[tablename][k])) or (k, record[k]) \
for k in field._table._primarykey ] ))
row.append(TD(A(r, _href='%s/%s?%s' % (linkto, tablename, key))))
else:
row.append(TD(r))
tbody.append(TR(_class=_class, *row))
components.append(TBODY(*tbody))
0
Example 94
Project: commcare-hq Source File: object_fetch_api.py
@method_decorator(login_or_digest_or_basic_or_apikey())
def get(self, request, domain, case_id=None, attachment_id=None):
"""
https://github.com/dimagi/commcare/wiki/CaseAttachmentAPI
max_size The largest size (in bytes) for the attachment
max_image_width The largest width in pixels for an an image attachment
max_image_height The largest width in pixels for an an image attachment
"""
if self.request.couch_user.is_web_user() and not can_view_attachments(self.request):
return HttpResponseForbidden()
if not case_id or not attachment_id:
raise Http404
img = self.request.GET.get('img', None)
size = self.request.GET.get('size', OBJECT_ORIGINAL)
max_width = int(self.request.GET.get('max_image_width', 0))
max_height = int(self.request.GET.get('max_image_height', 0))
max_filesize = int(self.request.GET.get('max_size', 0))
try:
CaseAccessors(domain).get_case(case_id)
except CaseNotFound:
raise Http404
if img is not None:
if size == "debug_all":
url_base = reverse("api_case_attachment", kwargs={
"domain": self.request.domain,
"case_id": case_id,
"attachment_id": attachment_id,
})
r = HttpResponse(content_type="text/html")
r.write('<html><body>')
r.write('<ul>')
for fsize in IMAGE_SIZE_ORDERING:
meta, stream = fetch_case_image(
domain,
case_id,
attachment_id,
filesize_limit=max_filesize,
width_limit=max_width,
height_limit=max_height,
fixed_size=fsize
)
r.write('<li>')
r.write('Size: %s<br>' % fsize)
r.write("Limit: max_size: %s" % max_filesize)
if max_width > 0:
r.write(", max_width: %s" % max_width)
if max_height > 0:
r.write(", max_height: %s" % max_height)
r.write("<br>")
if meta is not None:
r.write('Resolution: %d x %d<br>' % (meta['width'], meta['height']))
r.write('Filesize: %d<br>' % meta['content_length'])
url_params = urllib.urlencode({
"img": '1',
"size": fsize,
"max_size": max_filesize,
"max_image_width": max_width,
"max_image_height": max_height
})
r.write('<img src="%(attach_url)s?%(params)s">' % {
"attach_url": url_base,
"params": url_params
})
else:
r.write('Not available')
r.write('</li>')
r.write('</ul></body></html>')
return r
else:
attachment_meta, attachment_stream = fetch_case_image(
domain,
case_id,
attachment_id,
filesize_limit=max_filesize,
width_limit=max_width,
height_limit=max_height,
fixed_size=size
)
else:
cached_attachment = get_cached_case_attachment(domain, case_id, attachment_id)
attachment_meta, attachment_stream = cached_attachment.get()
if attachment_meta is not None:
mime_type = attachment_meta['content_type']
else:
mime_type = "plain/text"
return StreamingHttpResponse(streaming_content=FileWrapper(attachment_stream),
content_type=mime_type)
0
Example 95
def get_data(self, start_date, end_date, metrics, dimensions=[], sort=[], filters=[], start_index=0, max_results=0):
"""
Pulls data in from an account and returns a processed data structure for
easy post processing. This method requires the following inputs:
** Required Arguments **
``start_date``
A ``datetime`` object for the lower bound of your query
``end_date``
A ``datetime`` object for the upper bound of your query
``metrics``
A list of metrics, for example: ['pageviews', 'uniquePageviews']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReferenceDimensionsMetrics.html
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#dimensionsAndMetrics
** Optional Arguments **
``dimensions``
A list of dimensions, for example: ['country','browser']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReferenceDimensionsMetrics.html
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#dimensionsAndMetrics
``sort``
A list of dimensions or metrics to sort the output by, should probably
be one of the items you specified in ``dimensions`` or ``metrics``.
For example: ['browser', 'pageviews']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#sorting
``filters``
A list of filters. A filter expression has three parts:
name - The name of the dimension or metric to filter on.
For example: ga:pageviews will filter on the pageviews metric.
operator - Defines the type of filter match to use. Operators are
specific to either dimensions or metrics.
expression - States the values included or excluded from the results.
Expressions use regular expression syntax.
Learn more about valid operators and expressions here:
http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#filtering
The ``filters`` input accepts this data as a list of lists like so. Please
note that order matters, especially when using boolean operators (see
below).
[
['browser', '=~', 'Firefox', 'AND'], # Regular expression match on 'Firefox'
['browser', '=~', 'Internet (Explorer|Exploder)', 'OR'],
['city', '=@', 'York', 'OR'], # All cities with York as a substring
['state', '!=', 'California', 'AND'], # Everything but California
['timeOnPage', '<', '10'], # Reject results where timeonpage < 10sec
]
Filters can be combined with AND boolean logic as well as with OR
boolean logic. When using both operators, the OR operator has higher
precendence. When you are using more than one filter, please specify
a fourth item in your list 'AND' or 'OR' to explicitly spell out the
filters' relationships:
For example, this filter selects data from the United States from the
browser Firefox.
[
['country', '==', 'United States', 'OR'],
['browser', '=@', 'FireFox'],
]
This filter selects data from either the United States or Canada.
[
['country', '==', 'United States', 'AND'],
['country', '==', 'Canada'],
]
The first filter limits results to cities starting with 'L' and ending
with 'S'. The second limits results to browsers starting with 'Fire'
and the cities starting with 'L':
[
['city', '=~', '^L.*S$']
]
[
['city', '=~', '^L', 'AND'],
['browser', '=~', '^Fire']
]
``start_index``
The first row to return, starts at 1. This is useful for paging in combination with
max_results, and also to get results past row 1000 (Google Data does not return
more than 1000 results at once)
``max_results``
Number of results to return.
"""
path = '/analytics/feeds/data'
if start_date > end_date:
raise GoogleAnalyticsClientError('Date orders are reversed')
data = {
'ids': self.table_id,
'start-date': start_date.strftime('%Y-%m-%d'),
'end-date': end_date.strftime('%Y-%m-%d'),
}
if start_index > 0:
data['start-index'] = str(start_index)
if max_results > 0:
data['max-results'] = str(max_results)
if dimensions:
data['dimensions'] = ",".join(['ga:' + d for d in dimensions])
data['metrics'] = ",".join(['ga:' + m for m in metrics])
if sort:
_sort = []
for s in sort:
pre = 'ga:'
if s[0] == '-':
pre = '-ga:'
s = s[1:]
_sort.append(pre + s)
data['sort'] = ",".join(_sort)
if filters:
filter_string = self.process_filters(filters)
data['filters'] = filter_string
data = urllib.urlencode(data)
response = self.connection.make_request('GET', path=path, data=data)
raw_xml = response.read()
processed_data = DataSet(raw_xml)
return processed_data
0
Example 96
Project: wikiteam Source File: uploader.py
def upload(wikis, config={}):
headers = {'User-Agent': dumpgenerator.getUserAgent()}
for wiki in wikis:
print "#"*73
print "# Uploading", wiki
print "#"*73
wiki = wiki.lower()
prefix = dumpgenerator.domain2prefix(config={'api': wiki})
wikiname = prefix.split('-')[0]
dumps = []
for dirname, dirnames, filenames in os.walk('.'):
if dirname == '.':
for f in filenames:
if f.startswith('%s-' % (wikiname)) and (f.endswith('-wikidump.7z') or f.endswith('-history.xml.7z')):
dumps.append(f)
break
c = 0
for dump in dumps:
wikidate = dump.split('-')[1]
item = get_item('wiki-' + wikiname)
if dump in uploadeddumps:
if config['prune-directories']:
rmline='rm -rf %s-%s-wikidump/' % (wikiname, wikidate)
# With -f the deletion might have happened before and we won't know
if not os.system(rmline):
print 'DELETED %s-%s-wikidump/' % (wikiname, wikidate)
if config['prune-wikidump'] and dump.endswith('wikidump.7z'):
# Simplistic quick&dirty check for the presence of this file in the item
stdout, stderr = subprocess.Popen(["md5sum", dump], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
dumphash = re.sub(' +.+\n?', '', stdout)
if dumphash in map(lambda x: x['md5'], item.files):
log(wiki, dump, 'verified')
rmline='rm -rf %s' % dump
if not os.system(rmline):
print 'DELETED ' + dump
print '%s was uploaded before, skipping...' % (dump)
continue
else:
print 'ERROR: The online item misses ' + dump
log(wiki, dump, 'missing')
# We'll exit this if and go upload the dump
else:
print '%s was uploaded before, skipping...' % (dump)
continue
time.sleep(0.1)
wikidate_text = wikidate[0:4]+'-'+wikidate[4:6]+'-'+wikidate[6:8]
print wiki, wikiname, wikidate, dump
# Does the item exist already?
ismissingitem = not item.exists
# Logo path
logourl = ''
if ismissingitem or config['update']:
#get metadata from api.php
#first sitename and base url
params = {'action': 'query', 'meta': 'siteinfo', 'format': 'xml'}
data = urllib.urlencode(params)
req = urllib2.Request(url=wiki, data=data, headers=headers)
xml = ''
try:
f = urllib2.urlopen(req)
xml = f.read()
f.close()
except:
pass
sitename = ''
baseurl = ''
lang = ''
try:
sitename = re.findall(ur"sitename=\"([^\"]+)\"", xml)[0]
except:
pass
try:
baseurl = re.findall(ur"base=\"([^\"]+)\"", xml)[0]
except:
pass
try:
lang = re.findall(ur"lang=\"([^\"]+)\"", xml)[0]
except:
pass
if not sitename:
sitename = wikiname
if not baseurl:
baseurl = re.sub(ur"(?im)/api\.php", ur"", wiki)
if lang:
lang = convertlang.has_key(lang.lower()) and convertlang[lang.lower()] or lang.lower()
#now copyright info from API
params = {'action': 'query', 'siprop': 'general|rightsinfo', 'format': 'xml'}
data = urllib.urlencode(params)
req = urllib2.Request(url=wiki, data=data, headers=headers)
xml = ''
try:
f = urllib2.urlopen(req)
xml = f.read()
f.close()
except:
pass
rightsinfourl = ''
rightsinfotext = ''
try:
rightsinfourl = re.findall(ur"rightsinfo url=\"([^\"]+)\"", xml)[0]
rightsinfotext = re.findall(ur"text=\"([^\"]+)\"", xml)[0]
except:
pass
raw = ''
try:
f = urllib.urlopen(baseurl)
raw = f.read()
f.close()
except:
pass
#or copyright info from #footer in mainpage
if baseurl and not rightsinfourl and not rightsinfotext:
rightsinfotext = ''
rightsinfourl = ''
try:
rightsinfourl = re.findall(ur"<link rel=\"copyright\" href=\"([^\"]+)\" />", raw)[0]
except:
pass
try:
rightsinfotext = re.findall(ur"<li id=\"copyright\">([^\n\r]*?)</li>", raw)[0]
except:
pass
if rightsinfotext and not rightsinfourl:
rightsinfourl = baseurl + '#footer'
try:
logourl = re.findall(ur'p-logo["\'][^>]*>\s*<a [^>]*background-image:\s*(?:url\()?([^;)"]+)', raw)[0]
except:
pass
print logourl
#retrieve some info from the wiki
wikititle = "Wiki - %s" % (sitename) # Wiki - ECGpedia
wikidesc = "<a href=\"%s\">%s</a> dumped with <a href=\"https://github.com/WikiTeam/wikiteam\" rel=\"nofollow\">WikiTeam</a> tools." % (baseurl, sitename)# "<a href=\"http://en.ecgpedia.org/\" rel=\"nofollow\">ECGpedia,</a>: a free electrocardiography (ECG) tutorial and textbook to which anyone can contribute, designed for medical professionals such as cardiac care nurses and physicians. Dumped with <a href=\"https://github.com/WikiTeam/wikiteam\" rel=\"nofollow\">WikiTeam</a> tools."
wikikeys = ['wiki', 'wikiteam', 'MediaWiki', sitename, wikiname] # ecg; ECGpedia; wiki; wikiteam; MediaWiki
if not rightsinfourl and not rightsinfotext:
wikikeys.append('unknowncopyright')
wikilicenseurl = rightsinfourl # http://creativecommons.org/licenses/by-nc-sa/3.0/
wikirights = rightsinfotext # e.g. http://en.ecgpedia.org/wiki/Frequently_Asked_Questions : hard to fetch automatically, could be the output of API's rightsinfo if it's not a usable licenseurl or "Unknown copyright status" if nothing is found.
wikiurl = wiki # we use api here http://en.ecgpedia.org/api.php
else:
print 'Item already exists.'
lang = 'foo'
wikititle = 'foo'
wikidesc = 'foo'
wikikeys = 'foo'
wikilicenseurl = 'foo'
wikirights = 'foo'
wikiurl = 'foo'
if c == 0:
# Item metadata
md = {
'mediatype': 'web',
'collection': config['collection'],
'title': wikititle,
'description': wikidesc,
'language': lang,
'last-updated-date': wikidate_text,
'subject': '; '.join(wikikeys), # Keywords should be separated by ; but it doesn't matter much; the alternative is to set one per field with subject[0], subject[1], ...
'licenseurl': wikilicenseurl and urlparse.urljoin(wiki, wikilicenseurl),
'rights': wikirights,
'originalurl': wikiurl,
}
#Upload files and update metadata
try:
item.upload(dump, metadata=md, access_key=accesskey, secret_key=secretkey, verbose=True)
item.modify_metadata(md) # update
print 'You can find it in https://archive.org/details/wiki-%s' % (wikiname)
if logourl:
logo = StringIO.StringIO(urllib.urlopen(urlparse.urljoin(wiki, logourl)).read())
logoextension = logourl.split('.')[-1] if logourl.split('.') else 'unknown'
logo.name = 'wiki-' + wikiname + '_logo.' + logoextension
item.upload(logo, access_key=accesskey, secret_key=secretkey, verbose=True)
uploadeddumps.append(dump)
log(wiki, dump, 'ok')
except:
print wiki, dump, 'error when uploading?'
c += 1
0
Example 97
@staticmethod
def request(method, url, post_params=None, body=None, headers=None, raw_response=False):
"""Perform a REST request and parse the response.
Args:
method: An HTTP method (e.g. 'GET' or 'POST').
url: The URL to make a request to.
post_params: A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body: The body of the request. Typically, this value will be a string.
It may also be a file-like object in Python 2.6 and above. The body
parameter may not be used with the post_params parameter.
headers: A dictionary of headers to send with the request.
raw_response: Whether to return the raw httplib.HTTPReponse object. [default False]
It's best enabled for requests that return large amounts of data that you
would want to .read() incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns:
The JSON-decoded data from the server, unless raw_response is
specified, in which case an httplib.HTTPReponse object is returned instead.
Raises:
dropbox.rest.ErrorResponse: The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
dropbox.rest.RESTSocketError: A socket.error was raised while contacting Dropbox.
"""
post_params = post_params or {}
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
body = urllib.urlencode(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
host = urlparse.urlparse(url).hostname
conn = ProperHTTPSConnection(host, 443)
try:
# This code is here because httplib in pre-2.6 Pythons
# doesn't handle file-like objects as HTTP bodies and
# thus requires manual buffering
if not hasattr(body, 'read'):
conn.request(method, url, body, headers)
else:
#We need to get the size of what we're about to send for the Content-Length
#Must support len() or have a len or fileno(), otherwise we go back to what we were doing!
clen = None
try:
clen = len(body)
except (TypeError, AttributeError):
try:
clen = body.len
except AttributeError:
try:
clen = os.fstat(body.fileno()).st_size
except AttributeError:
# fine, lets do this the hard way
# load the whole file at once using readlines if we can, otherwise
# just turn it into a string
if hasattr(body, 'readlines'):
body = body.readlines()
conn.request(method, url, str(body), headers)
if clen != None: #clen == 0 is perfectly valid. Must explicitly check for None
clen = str(clen)
headers["Content-Length"] = clen
conn.request(method, url, "", headers)
BLOCKSIZE = 4096 #4MB buffering just because
data=body.read(BLOCKSIZE)
while data:
conn.send(data)
data=body.read(BLOCKSIZE)
except socket.error, e:
raise RESTSocketError(host, e)
except CertificateError, e:
raise RESTSocketError(host, "SSL certificate error: " + e)
r = conn.getresponse()
if r.status != 200:
raise ErrorResponse(r)
if raw_response:
return r
else:
try:
resp = json.loads(r.read())
except ValueError:
raise ErrorResponse(r)
finally:
conn.close()
return resp
0
Example 98
Project: faf Source File: reports.py
@reports.route("/<int:report_id>/associate_bz", methods=("GET", "POST"))
def associate_bug(report_id):
result = (db.session.query(Report, OpSysComponent)
.join(OpSysComponent)
.filter(Report.id == report_id)
.first())
if result is None:
abort(404)
report, component = result
is_maintainer = is_component_maintainer(db, g.user, component)
if not is_maintainer:
flash("You are not the maintainer of this component.", "danger")
return redirect(url_for("reports.item", report_id=report_id))
form = AssociateBzForm(request.form)
if request.method == "POST" and form.validate():
bug_id = form.bug_id.data
reportbug = (db.session.query(ReportBz)
.filter(
(ReportBz.report_id == report.id) &
(ReportBz.bzbug_id == bug_id))
.first())
if reportbug:
flash("Bug already associated.", "danger")
else:
bug = get_bz_bug(db, bug_id)
if not bug:
tracker = bugtrackers[form.bugtracker.data]
try:
bug = tracker.download_bug_to_storage_no_retry(db, bug_id)
except Exception as e:
flash("Failed to fetch bug. {0}".format(str(e)), "danger")
raise
if bug:
new = ReportBz()
new.report = report
new.bzbug = bug
db.session.add(new)
db.session.flush()
db.session.commit()
flash("Bug successfully associated.", "success")
return redirect(url_for("reports.item", report_id=report_id))
else:
flash("Failed to fetch bug.", "danger")
bthash_url = url_for("reports.bthash_forward",
bthash=report.hashes[0].hash,
_external=True)
new_bug_params = {
"component": component.name,
"short_desc": "[abrt] [faf] {0}: {1}(): {2} killed by {3}"
.format(component.name,
report.crash_function,
",".join(exe.path for exe in report.executables),
report.errname
),
"comment": "This bug has been created based on an anonymous crash "
"report requested by the package maintainer.\n\n"
"Report URL: {0}"
.format(bthash_url),
"bug_file_loc": bthash_url
}
new_bug_urls = []
for rosr in report.opsysreleases:
osr = rosr.opsysrelease
for bugtracker in bugtrackers.keys():
try:
params = new_bug_params.copy()
if osr.opsys.name.startswith("Red Hat"):
params.update(product="{0} {1}".format(osr.opsys.name,
osr.version[0]),
version=osr.version)
else:
params.update(product=osr.opsys.name, version=osr.version)
new_bug_urls.append(
("{0} {1} in {2}".format(osr.opsys.name, osr.version,
bugtracker),
"{0}?{1}".format(
bugtrackers[bugtracker].new_bug_url,
urllib.urlencode(params))
)
)
except:
pass
return render_template("reports/associate_bug.html",
form=form,
report=report,
new_bug_urls=new_bug_urls)
0
Example 99
Project: Django-Socialauth Source File: views.py
def begin(request, redirect_to=None, on_failure=None, user_url=None,
template_name='openid_consumer/signin.html'):
on_failure = on_failure or default_on_failure
trust_root = getattr(
settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
# foo derbis.
redirect_to = redirect_to or getattr(
settings, 'OPENID_REDIRECT_TO',
# If not explicitly set, assume current URL with complete/ appended
get_full_url(request).split('?')[0] + 'complete/'
)
# In case they were lazy...
if not (
redirect_to.startswith('http://')
or
redirect_to.startswith('https://')):
redirect_to = get_url_host(request) + redirect_to
if request.GET.get('next') and is_valid_next_url(request.GET['next']):
if '?' in redirect_to:
join = '&'
else:
join = '?'
redirect_to += join + urllib.urlencode({
'next': request.GET['next']
})
if not user_url:
user_url = request.REQUEST.get('openid_url', None)
if not user_url:
request_path = request.path
if request.GET.get('next'):
request_path += '?' + urllib.urlencode({
'next': request.GET['next']
})
return render(template_name, {
'action': request_path,
}, RequestContext(request))
if xri.identifierScheme(user_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
return on_failure(request, _('i-names are not supported'))
consumer = Consumer(request.session, DjangoOpenIDStore())
try:
auth_request = consumer.begin(user_url)
except DiscoveryFailure:
return on_failure(request, _('The OpenID was invalid'))
sreg = getattr(settings, 'OPENID_SREG', False)
if sreg:
s = SRegRequest()
for sarg in sreg:
if sarg.lower().lstrip() == "policy_url":
s.policy_url = sreg[sarg]
else:
for v in sreg[sarg].split(','):
s.requestField(field_name=v.lower().lstrip(),
required=(sarg.lower().lstrip() ==
"required"))
auth_request.addExtension(s)
pape = getattr(settings, 'OPENID_PAPE', False)
if pape:
if openid.__version__ <= '2.0.0' and openid.__version__ >= '2.1.0':
raise (ImportError,
'For pape extension you need python-openid 2.1.0 or newer')
p = PapeRequest()
for parg in pape:
if parg.lower().strip() == 'policy_list':
for v in pape[parg].split(','):
p.addPolicyURI(v)
elif parg.lower().strip() == 'max_auth_age':
p.max_auth_age = pape[parg]
auth_request.addExtension(p)
OPENID_AX_PROVIDER_MAP = getattr(settings, 'OPENID_AX_PROVIDER_MAP', {})
openid_provider = ('Google' if
'google' in request.session.get('openid_provider', '')
else 'Default')
ax = OPENID_AX_PROVIDER_MAP.get(openid_provider)
if ax:
axr = AXFetchRequest()
for attr_name, attr_url in ax.items():
# axr.add(AttrInfo(i['type_uri'],
# i['count'], i['required'],
# i['alias']))
# setting all as required attrs
axr.add(AttrInfo(attr_url, required=True))
auth_request.addExtension(axr)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
return HttpResponseRedirect(redirect_url)
0
Example 100
def SOCIAL_LOGIN(self,social_login_url):
cj = cookielib.LWPCookieJar(os.path.join(ADDON_PATH_PROFILE, 'cookies.lwp'))
cj.load(os.path.join(ADDON_PATH_PROFILE, 'cookies.lwp'),ignore_discard=True)
ck = cookielib.Cookie(version=0, name='s_cc', value='true', port=None, port_specified=False, domain='identity1.dishnetwork.com', domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
cj.set_cookie(ck)
ck = cookielib.Cookie(version=0, name='s_sq', value='synacortveauth%3D%2526pid%253DFederated%252520Login%2526pidt%253D1%2526oid%253Dauthsynacor_identity1.dishnetwork.com%2526oidt%253D3%2526ot%253DSUBMIT', port=None, port_specified=False, domain='identity1.dishnetwork.com', domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
cj.set_cookie(ck)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", social_login_url),
("Origin", "https://identity1.dishnetwork.com"),
("User-Agent", UA_IPHONE)]
login_data = urllib.urlencode({'username' : USERNAME,
'password' : PASSWORD,
'login_type' : 'username,password',
'source' : 'authsynacor_identity1.dishnetwork.com',
'source_button' : 'authsynacor_identity1.dishnetwork.com',
'remember_me' : 'yes'
})
resp = opener.open(social_login_url,login_data)
#final_response = resp.read()
print "RESP.GETURL() ==="
print resp.geturl()
last_url = resp.geturl()
print "RESP.INFO() ==="
print resp.info()
print "RESP.GETCODE() ==="
print resp.getcode()
#final_response = resp.read().decode('utf-8')
#print "RESP.READ() ==="
#print final_response
if resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
final_response = f.read()
else:
final_response = resp.read()
resp.close()
print "FINAL RESPONSE"
print final_response
final_response = final_response.replace('\n',"")
discovery_url = FIND(final_response,'location.href = "','"')
saml_response = ''
relay_state = ''
if 'captcha' in final_response:
saml_response = 'captcha'
if discovery_url != '':
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", last_url),
("Origin", "https://identity1.dishnetwork.com"),
#("Cookie", cookies + " s_cc=true; s_sq=synacortveauth%3D%2526pid%253DFederated%252520Login%2526pidt%253D1%2526oid%253Dauthsynacor_identity1.dishnetwork.com%2526oidt%253D3%2526ot%253DSUBMIT"),
("User-Agent", UA_IPHONE)]
resp = opener.open(discovery_url)
idp_source = resp.read()
print resp.geturl()
last_url = resp.geturl()
#print idp_source
resp.close()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [ ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-us"),
("Content-Type", "application/x-www-form-urlencoded"),
("Proxy-Connection", "keep-alive"),
("Connection", "keep-alive"),
("Referer", last_url),
("Origin", "https://identity1.dishnetwork.com"),
#("Cookie", cookies + " s_cc=true; s_sq=synacortveauth%3D%2526pid%253DFederated%252520Login%2526pidt%253D1%2526oid%253Dauthsynacor_identity1.dishnetwork.com%2526oidt%253D3%2526ot%253DSUBMIT"),
("User-Agent", UA_IPHONE)]
resp = opener.open(last_url+"&history=3")
if resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
idp_source = f.read()
#idp_source = resp.read()
print resp.geturl()
last_url = resp.geturl()
print idp_source
resp.close()
saml_response = FIND(idp_source,'<input type="hidden" name="SAMLResponse" value="','"')
relay_state = FIND(idp_source,'<input type="hidden" name="RelayState" value="','"')
#Set Global header fields
ORIGIN = 'https://identity1.dishnetwork.com'
REFERER = last_url
#cj.save(os.path.join(ADDON_PATH_PROFILE, 'cookies.lwp'),ignore_discard=True);
SAVE_COOKIE(cj)
return saml_response, relay_state