urllib2.urlopen.read

Here are the examples of the python api urllib2.urlopen.read taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

170 Examples 7

Example 1

Project: openjumo Source File: etl.py
def html_to_story(doc, strip_control_chars=True):
    try:
        # Send the HTML over to Data Science Toolkit
        story = urllib2.urlopen( '/'.join([settings.DSTK_API_BASE, 'html2story']), data=doc).read()
        
        story = json.loads(story).get('story', '')
        
        if strip_control_chars:
            story = strip_control_characters(story)
        return story
    except urllib2.URLError, e:
        return ''

Example 2

Project: OSCAAR Source File: post_setup.py
def to_do_at_exit():
    if not hasattr(sys, 'real_prefix'):
        #import subprocess
        #subprocess.check_call(['python', 'registration.py'])
        import re
        import urllib2
        url = urllib2.urlopen("https://github.com/OSCAAR/OSCAAR/commits/" \
                              "master").read()
        sha = re.search('href="/OSCAAR/OSCAAR/commit/[a-z0-9]*"', 
                        str(url)).group(0).rpartition("/")[2]
        with open(os.path.join(os.path.dirname(oscaar.__file__),'__init__.py'),
                  "a") as myfile:
            myfile.write("\n__sha__ = \"%s" % sha)
        
        from oscaar import registration

Example 3

Project: huxley Source File: pr.py
@task
def patch(number):
    '''Fetch the branch associated with the given pull request.'''
    url = 'https://api.github.com/repos/bmun/huxley/pulls/%s' % number
    pr = json.loads(urllib2.urlopen(url).read())
    author = pr['head']['user']['login']
    repo = pr['head']['repo']['clone_url']
    branch = pr['head']['ref']

    print ui.info('Checking out %s from %s...' % (branch, author))
    local('git fetch %s %s:%s' % (repo, branch, branch))
    local('git checkout %s' % branch)

    return (author, branch)

Example 4

Project: sogouWeixin Source File: middleware.py
Function: get_proxies
    def __getproxies__(self):
        PROXY_SERVICE = r"http://172.16.80.64:50000/select?num=500&speed=0.5"
        
        data = urllib2.urlopen(PROXY_SERVICE).read()
        proxies = json.loads(data)['ips']
        
        pxy = self.__zhengli__(proxies)
        res = []
        for proxy in pxy:
            ip_port = {'ip_port': "http://" + str(proxy[u'ip']) + ":" + str(proxy[u'port'])}
            res.append(ip_port)
                
        return res

Example 5

Project: redis-completion Source File: stocks.py
Function: load_data
def load_data():
    url = 'http://media.charlesleifer.com/downloads/misc/NYSE.txt'
    contents = urllib2.urlopen(url).read()
    for row in contents.splitlines()[1:]:
        ticker, company = row.split('\t')
        engine.store_json(ticker, company, {'ticker': ticker, 'company': company}) # id, search phrase, data

Example 6

Project: luna Source File: updateservice.py
    def do_update(self, update):
        file_path = update.file_path
        with open(file_path, 'wb') as asset:
            asset.write(urllib2.urlopen(update.asset_url).read())
            asset.close()
        zip_file = zipfile.ZipFile(file_path)
        zip_file.extractall(xbmcaddon.Addon().getAddonInfo('path'), self._get_members(zip_file))

        xbmcgui.Dialog().ok(
            self.core.string('name'),
            'Luna has been updated to version %s and will now relaunch.' % update.update_version
        )

        xbmc.executebuiltin('RunPlugin(\'script.luna\')')

Example 7

Project: chronology Source File: load_test_elections_data.py
def load_test_data(args):
  donations = ZipFile(StringIO(urllib2.urlopen(DONATIONS_FILE_URL).read()))
  donations = StringIO(donations.read('%s.csv' % DONATIONS_FILE_NAME))

  events = []
  rows = csv.DictReader(donations)
  for row in rows:
    row[TIMESTAMP_FIELD] = parse(row['contb_receipt_dt'])
    events.append(row)

  kc = KronosClient(args.kronos_url)
  kc.put({'donations': events})

Example 8

Project: runbook Source File: stathat.py
    def _send_inner(self, endpoint, data, silent=False):
        try:
            return urllib2.urlopen(endpoint, urllib.urlencode(data)).read()
        except urllib2.URLError:
            # We want to surface the error on non-async requests
            if not silent:
                raise
            return None

Example 9

Project: firestarter Source File: currency.py
def get_btc_rate():
	current_time = timezone.make_aware(datetime.datetime.now(), timezone.get_default_timezone())
	if not Value.objects.filter(type='BTC'):
		Value.objects.create(type='BTC', value=json.loads(urllib2.urlopen('https://api.bitcoinaverage.com/ticker/USD').read())['24h_avg'], update=True)
		return Value.objects.filter(type='BTC')[0].value
	elif ((current_time - Value.objects.filter(type='BTC')[0].created_at).days >= 1) and Value.objects.filter(type='BTC')[0].update:
		try:
			data = json.loads(urllib2.urlopen('https://api.bitcoinaverage.com/ticker/USD').read())
			v = Value.objects.filter(type='BTC')[0]
			v.value = data['24h_avg']
			v.created_at = current_time
			v.save()
		except:
			pass
		return Value.objects.filter(type='BTC')[0].value
	else:
		return Value.objects.filter(type='BTC')[0].value

Example 10

Project: urllib3 Source File: fetch_gae_sdk.py
def get_gae_versions():
    try:
        version_info_json = urllib2.urlopen(_SDK_URL).read()
    except:
        return {}
    try:
        version_info = json.loads(version_info_json)
    except:
        return {}
    return version_info.get('items', {})

Example 11

Project: KataTrainReservation Source File: python2_guiding_test.py
Function: test_reserve_seats_via_post
    def test_reserve_seats_via_POST(self):
        form_data = {"train_id": "express_2000", "seat_count": 4}
        data = urllib.urlencode(form_data)
        
        response = urllib2.urlopen(url + "/reserve", data=data).read()
        reservation = json.loads(response)
        
        assert "express_2000" == reservation["train_id"]
        assert 4 == len(reservation["seats"])
        assert "1A" == reservation["seats"][0]
        assert "75bcd15" == reservation["booking_reference"]

Example 12

Project: CuckooSploit Source File: community.py
Function: download_archive
def download_archive():
    print("Downloading modules from {0}".format(URL))

    try:
        data = urllib2.urlopen(URL).read()
    except Exception as e:
        print("ERROR: Unable to download archive: %s" % e)
        sys.exit(-1)

    zip_data = StringIO()
    zip_data.write(data)
    archive = ZipFile(zip_data, "r")
    temp_dir = tempfile.mkdtemp()
    archive.extractall(temp_dir)
    archive.close()
    final_dir = os.path.join(temp_dir, os.listdir(temp_dir)[0])

    return temp_dir, final_dir

Example 13

Project: terminal-tools Source File: reactive.py
def get_terminal_details(user_token, access_token, subdomain):
    output = json.loads(urllib2.urlopen('https://www.terminal.com/api/v0.1/get_terminal',
                                        urllib.urlencode({
                                            'user_token': user_token,
                                            'access_token': access_token,
                                            'subdomain': subdomain,
                                        })).read())
    return output

Example 14

Project: rednotebook Source File: utils.py
def get_new_version_number():
    """
    Reads version number from website and returns None if it cannot be read
    """
    version_pattern = re.compile(r'<span id="download-version">(.+)</span>')

    try:
        project_xml = urlopen('http://rednotebook.sourceforge.net/index.html').read()
        match = version_pattern.search(project_xml)
        if not match:
            return None
        new_version = match.group(1)
        new_version = StrictVersion(new_version)
        logging.info('%s is the latest version' % new_version)
        return new_version
    except (IOError, httplib.HTTPException):
        return None

Example 15

Project: rank-es Source File: scores.py
def get_fb_score(url):
    """Returns Facebook score for the given URL.
    Please note: http://stackoverflow.com/questions/5699270/how-to-get-share-counts-using-graph-api
    
    """
    
    try:
        fb_url = 'http://graph.facebook.com/' + url
        d = urllib2.urlopen(fb_url).read()
        j = json.JSONDecoder().decode(d)
        if 'shares' in j:   
            return j['shares']
        else:
            return 0
    except:
        return 0

Example 16

Project: LEHome Source File: Sound.py
Function: play
def play(path, inqueue=False, channel='default', loop=-1):
    url = get_play_request_url(path, inqueue, channel, loop)
    if url is None:
        return
    INFO("sending audio url: " + url)
    try:
        response = urllib2.urlopen(url).read()
    except urllib2.HTTPError, e:
        INFO(e)
        WARN("audio server address is invaild")
    except urllib2.URLError, e:
        INFO(e)
        WARN("audio server unavailable.")
    else:
        INFO("audio response: " + response)

Example 17

Project: prometapi Source File: models.py
def fetch_prikljucki():
    url = get_lokacije_url('prikljucki')
    original_data = urllib2.urlopen(url).read()

    json = {
        'updated': time.time(),
        'copyright': COPYRIGHT_PROMET,
        'prikljucki': _transform_dataset(original_data),
        }
    return original_data, json

Example 18

Project: api-v1-client-python Source File: util.py
Function: call_api
def call_api(resource, data=None, base_url=BASE_URL):
    try:
        payload = None if data is None else urlencode(data)
        if py_version >= 3 and payload is not None:
            payload = payload.encode('UTF-8')
        response = urlopen(base_url + resource, payload, timeout=TIMEOUT).read()
        return handle_response(response)
            
    except HTTPError as e:
        raise APIException(handle_response(e.read()), e.code)

Example 19

Project: watchdog Source File: capitolwords.py
Function: get_json
def _get_json(url):
    try:
        response = urllib2.urlopen(url).read()
        return json.loads(response)
    except urllib2.HTTPError, e:
        raise CwodApiError('Invalid Request')
    except ValueError, e:
        raise CwodApiError('Invalid Response')

Example 20

Project: Minecraft-Overviewer Source File: genPOI.py
    def get_name_from_uuid(self):
        sname = self._name.replace('-','')
        try:
            profile = PlayerDict.uuid_cache[sname]
            return profile['name']
        except (KeyError,):
            pass

        try:
            profile = json.loads(urllib2.urlopen(UUID_LOOKUP_URL + sname).read())
            if 'name' in profile:
                PlayerDict.uuid_cache[sname] = profile
                return profile['name']
        except (ValueError, urllib2.URLError):
            logging.warning("Unable to get player name for UUID %s", self._name)

Example 21

Project: cloud-services-notifications Source File: utils.py
def download_image_to_tmp(url):
    filename = url.replace('http://', '0_')
    filename = filename.replace('/', '_')
    fullname = os.path.join(tempfile.gettempdir(), filename)

    if os.path.exists(fullname):
        return fullname
        
    f = urllib2.urlopen(url).read()

    fich = open(fullname, 'w+')
    fich.write(f)
    fich.close()
    
    return fullname

Example 22

Project: tumblr-backup Source File: tumblrbackup.py
Function: get_posts
def get_posts(blog_url, api_key, limit=20, offset=0, page=1):
    """Return JSON of a paginated set of posts."""
    api_url = 'http://api.tumblr.com/v2/blog/{0}/posts?api_key={1}&limit={2}&offset={3}'
    api_url = api_url.format(blog_url, api_key, limit, offset)

    json_obj = json.loads(urllib2.urlopen(api_url).read())
    total = json_obj['response']['total_posts']

    if 0 == total:
        return False

    total_left = max(0, total - (page * limit))
    response = {}
    response['posts'] = json_obj['response']['posts']
    response['pages_left'] = math.ceil(float(total_left) / float(limit))
    return response

Example 23

Project: autotest Source File: source.py
    def get_ahref_list(self, url, pattern):
        self.reset(url, pattern)
        self.feed(urllib2.urlopen(url).read())
        self.close()

        return self.links

Example 24

Project: walrus Source File: stocks.py
Function: load_data
def load_data():
    url = 'http://media.charlesleifer.com/blog/downloads/misc/NYSE.txt'
    contents = urllib2.urlopen(url).read()
    for row in contents.splitlines()[1:]:
        ticker, company = row.split('\t')
        autocomplete.store(
            ticker,
            company,
            {'ticker': ticker, 'company': company})

Example 25

Project: SickRage Source File: torrentparser.py
    def _get_raw_torrent(self):
        """Get raw torrent data by determining what self.torrent is"""
        # already raw?
        if self._is_raw():
            self.file_type = "raw"
            self._raw_torrent = self.torrent
            return
        # local file?
        if os.path.isfile(self.torrent):
            self.file_type = "file"
            self._raw_torrent = open(self.torrent, "rb").read()
        # url?
        elif re.search("^(http|ftp):\/\/", self.torrent, re.I):
            self.file_type = "url"
            self._raw_torrent = urlopen(self.torrent).read()

Example 26

Project: encore.ai Source File: get_all_songs.py
Function: download_songs
def download_songs(url):
  time.sleep(random.random() * 0.5)
  try:
    page = urllib2.urlopen(url).read()
    soup = BeautifulSoup(page, 'html.parser')

    # Get the artist name
    artist_name = soup.findAll('h1')[0].get_text()[:-7].lower().replace(' ', '_')

    # Store all songs for a given artist
    with open('artist_data/'+artist_name+'.txt', 'wb') as w:
      for song in soup.findAll('a', {'target': '_blank'}):
        if 'lyrics/' in song['href']:
          song_url = song['href'][1:].strip()
          w.write(song_url + '\n')
  except urllib2.HTTPError:
    print '404 not found'

Example 27

Project: learning-python Source File: test_tuling123.py
Function: send
    def send(self, info):
        url = self.apiurl + 'key=' + self.key + '&' + 'info=' + info
        re = urllib2.urlopen(url).read()
        re_dict = json.loads(re)
        text = re_dict['text']
        print '- ', text
        self.get()

Example 28

Project: crawlers Source File: crawl.py
Function: get_page
def get_page(url, htmldir):
    try:
        page_in_txt = urllib2.urlopen(url).read()
    except urllib2.URLError:
        print 'Connection time out. Retrying in 10 seconds... %s' % url
        sleep(10)
        get_page(url, htmldir)

    idx = url.find('memCode=')
    if idx != -1:
        filename = '%s/%s.html' % (htmldir, url[idx + len('memCode='):])
    else:
        filename = '%s/index.html' % htmldir

    with open(filename, 'w') as f:
        f.write(page_in_txt)
    return page_in_txt.decode(PAGE_ENC)

Example 29

Project: poyonga Source File: client.py
Function: call_http
    def _call_http(self, cmd, **kwargs):
        domain = [self.protocol, "://", self.host, ":", str(self.port), self.prefix_path]
        url = "".join(domain) + cmd
        if kwargs:
            url = "".join([url, "?", urlencode(kwargs)])
        try:
            _data = urlopen(url).read()
        except HTTPError as msg:
            _data = msg.read()
        return _data

Example 30

Project: toproxy Source File: test.py
Function: test
    def test(self):
        base_url = '//xiaorui.cc/'
        urllib2.urlopen('https:' + base_url + 'get').read()
        urllib2.urlopen('http:' + base_url + 'get').read()
        urllib2.urlopen('https:' + base_url + 'post', '').read()
        urllib2.urlopen('http:' + base_url + 'post', '').read()

Example 31

Project: jabbapylib Source File: midnigh_comm.py
Function: main
def main():
    text = urllib2.urlopen(URL).read()
    soup = BeautifulSoup(text)
    for tag in soup.findAll('div', {'class': 'description'}):
        desc = tag.text
        result = re.search('^(Midnight Commander v.*\(stable release\))', desc)
        if result:
            latest = result.group(1)
            
    print latest

Example 32

Project: py3status Source File: whatismyip.py
Function: get_my_ip
    def _get_my_ip(self):
        """
        """
        try:
            ip = urlopen(self.url, timeout=self.timeout).read()
            ip = ip.decode('utf-8')
        except Exception:
            ip = None
        return ip

Example 33

Project: plugin.video.kmediatorrent Source File: immunicity.py
def config():
    global _config
    if not _config:
        with shelf("kmediatorrent.immunicity.pac_config", ttl=CACHE) as pac_config:
            plugin.log.info("Fetching Immunicity PAC file")
            pac_data = urllib2.urlopen(PAC_URL).read()
            pac_config["server"] = re.search(r"var proxyserver = '(.*)'", pac_data).group(1)
            pac_config["domains"] = map(lambda x: x.replace(r"\Z(?ms)", ""), map(fnmatch.translate, re.findall(r"\"(.*?)\",", pac_data)))
            _config = pac_config
    return _config

Example 34

Project: digital-signage-client Source File: utils.py
Function: shorten
@memoize
def shorten(url):
    """Minimalist URL shortener using SAPO services"""
    u = '?'.join(('http://services.sapo.pt/PunyURL/GetCompressedURLByURL',urllib.urlencode({'url':url})))
    try:
        x = xml.dom.minidom.parseString(urllib2.urlopen(u).read())
        return x.getElementsByTagName('ascii')[0].firstChild.data
    except:
        return url

Example 35

Project: RPi-chromium Source File: run_omxplayer.py
Function: check_update
def check_update():
    new_py = urllib2.urlopen("https://raw.githubusercontent.com/kusti8/RPi-chromium/master/native/run_omxplayer.py").read()
    old_py = open("/usr/bin/run_omxplayer.py", 'r').read()
    if new_py != old_py:
        open("run_omxplayer.py", 'w').write(new_py)
        subprocess.call("sudo mv run_omxplayer.py /usr/bin/run_omxplayer.py && sudo chmod +x /usr/bin/run_omxplayer.py", shell=True)
    new_man = urllib2.urlopen("https://raw.githubusercontent.com/kusti8/RPi-chromium/master/native/run_omx.json").read()
    old_man = open("/etc/chromium-browser/native-messaging-hosts/run_omx.json", 'r')
    if new_man != old_man:
        open("run_omx.json", "w").write(new_man)
        subprocess.call("sudo mv run_omx.json /etc/chromium-browser/native-messaging-hosts/run_omx.json", shell=True)
    subprocess.call("update-ytdl", shell=True)

Example 36

Project: gae-twibot Source File: api.py
    def initialize(self):
        # Get user info
        req = self.oauth.oauth_request(
            self.url["account"]["verify_credentials"])
        xml = urllib2.urlopen(req).read()
        self.user = twitterxml.xmlparse(xml)
        
        # Get rate limit
        limit = self.rate_limit()
        self.ratelimit_limit = int(limit["hourly-limit"])
        self.ratelimit_remaining = int(limit["remaining-hits"])
        self.ratelimit_reset = datetime.datetime.fromtimestamp(
            int(limit["reset-time-in-seconds"]))

        iplimit = self.rate_limit(ip_limit = True)
        self.ratelimit_iplimit = int(iplimit["hourly-limit"])
        self.ratelimit_ipremaining = int(iplimit["remaining-hits"])
        self.ratelimit_ipreset = datetime.datetime.fromtimestamp(
            int(iplimit["reset-time-in-seconds"]))

Example 37

Project: adhocracy Source File: microblog.py
@memoize('short_url')
def shorten_url(url):
    try:
        query = urlencode({
            'login': config.get('adhocracy.bitly.login',
                                DEFAULT_SHORTENER_USER),
            'apiKey': config.get('adhocracy.bitly.key',
                                 DEFAULT_SHORTENER_KEY),
            'longUrl': url.encode('utf-8'),
            'format': 'json',
            'version': '2.0.1'})
        request_url = SHORTENER_URL + "?" + str(query)
        data = json.loads(urlopen(request_url).read())
        if not data['statusCode'] == 'OK':
            return url
        return data['results'][url]['shortUrl']
    except:
        return url

Example 38

Project: pkgtools Source File: pypi.py
Function: retrieve
    def retrieve(self, version=None, req_func=None, timeout=None):
        '''
        Retrieve the raw data from PyPI and loads the JSON into a Python
        dictionary.
        '''
        def _request(url, timeout=None):
            return urllib2.urlopen(url, timeout=timeout).read()
        if req_func is None:
            req_func = _request
        url = self.URL.format(self.package_name + ('/{0}'.format(version)
                                                   if version else ''))
        data = req_func(url, timeout).decode('utf-8')
        json_data = json.loads(data)
        return json_data

Example 39

Project: opendr Source File: utils.py
Function: wget
def wget(url, dest_fname=None):
    import urllib2
    from os.path import split, join

    curdir = split(__file__)[0]
    if dest_fname is None:
        dest_fname = join(curdir, split(url)[1])

    try:
        contents = urllib2.urlopen(url).read()
    except:
        raise Exception('Unable to get url: %s' % (url,))
    open(dest_fname, 'w').write(contents)

Example 40

Project: pyoko Source File: schema_update.py
def get_schema_from_solr(index_name):
    url = 'http://%s:8093/internal_solr/%s/admin/file?file=%s.xml' % (settings.RIAK_SERVER,
                                                                      index_name, index_name)
    try:
        return urlopen(url).read()
    except HTTPError as e:
        if e.code == 404:
            return ""
        else:
            raise

Example 41

Project: monasca-agent Source File: rabbitmq.py
Function: get_data
    @staticmethod
    def _get_data(url):
        try:
            data = json.loads(urllib2.urlopen(url).read())
        except urllib2.URLError as e:
            raise Exception('Cannot open RabbitMQ API url: %s %s' % (url, str(e)))
        except ValueError as e:
            raise Exception('Cannot parse JSON response from API url: %s %s' % (url, str(e)))
        return data

Example 42

Project: ngcccbase Source File: blockchain.py
    def get_tx_confirmations(self, txhash):
        try:
            url = "https://blockchain.info/rawtx/%s" % txhash
            data = json.loads(urllib2.urlopen(url).read())
            if 'block_height' in data:
                block_count = self.get_block_count()
                return block_count - data['block_height'] + 1
            else:
                return 0
        except Exception as e:
            return None

Example 43

Project: Cat-Interop Source File: pavement.py
@task
def setup():
    """setup install"""

    if not os.path.exists(options.app.build):
        options.app.build.mkdir()
    if not os.path.exists(options.app.build / 'LinkPropertyLookupTable.csv'):
        if options.app.version.endswith('-dev'):  # master
            url = options.app.voc % 'master'
        else:
            url = options.app.voc % options.app.version
        with open(options.app.build / 'LinkPropertyLookupTable.csv', 'w') as fileobj:
            fileobj.write(urlopen(url).read())

Example 44

Project: pulp Source File: utils.py
Function: fetch
def fetch(url, storage_dir):
    name = file_name_from_url(url)
    path, handle = file_path_and_handle(storage_dir, name)

    body = urllib2.urlopen(url).read()

    handle.write(body)
    handle.close()

    return name

Example 45

Project: nocrack Source File: honey_client.py
def get_static_domains( *args ):
    h_string =  """
get the mapping of domains to index. Advanced level command!
./honey_client -getdomainhash <email> <token>
e.g. ./honey_client -v [email protected] 'ThisIsTheToken007+3lL='
"""
    if len(args)<2:
        return h_string
    data = {'username' : args[0],
            'token' : args[1].strip("'"),
            }
    req = create_request('getdomains', data)
    return urllib2.urlopen(req).read()

Example 46

Project: codenn Source File: bootstrap.py
Function: fetch
def fetch(url, targets):
    blob = urllib2.urlopen(url).read()
    gz = gzip.GzipFile(fileobj=StringIO(blob))
    tar = tarfile.TarFile(fileobj=gz)
    tmpdir = tempfile.mkdtemp()
    try:
        tar.extractall(tmpdir)
        for src, dest in targets:
            dest = os.path.join(LIB_DIR, dest)
            if os.path.isdir(dest):
                shutil.rmtree(dest)
            shutil.copytree(os.path.join(tmpdir, src), dest)
    finally:
        shutil.rmtree(tmpdir)

Example 47

Project: python-nytcongressapi Source File: nytcongressapi.py
Function: api_call
    @staticmethod
    def _apicall(path, params):
        # fix to allow for keyword args
        if params:
            url = "http://api.nytimes.com/svc/politics/v3/us/legislative/congress/%s.json?api-key=%s&%s" % (path, nytcongress.api_key, urllib.urlencode(params))
        else:
            url = "http://api.nytimes.com/svc/politics/v3/us/legislative/congress/%s.json?api-key=%s" % (path, nytcongress.api_key)
        if nytcongress.api_key is None:
            raise NYTCongressApiError('You did not supply an API key')        
        try:
            response = urllib2.urlopen(url).read()
            return json.loads(response)['results']
        except urllib2.HTTPError, e:
            raise NYTCongressApiError(e.read())
        except (ValueError, KeyError), e:
            raise NYTCongressApiError('Invalid Response')

Example 48

Project: keeleysam-recipes Source File: ParallelsURLProvider.py
Function: main
    def main(self):

        def compare_version(a, b):
            return cmp(LooseVersion(a), LooseVersion(b))

        valid_prods = URLS.keys()
        prod = self.env.get("product_name")
        if prod not in valid_prods:
            raise ProcessorError(
                "product_name %s is invalid; it must be one of: %s" %
                (prod, valid_prods))
        url = URLS[prod]
        try:
            manifest_str = urllib2.urlopen(url).read()
        except BaseException as e:
            raise ProcessorError(
                "Unexpected error retrieving product manifest: '%s'" %
                e)

        the_xml = xml.dom.minidom.parseString(manifest_str)
        products = the_xml.getElementsByTagName('Product')
        parallels = None
        for a_product in products:
            # Find the products that are 'Parallels Desktop'
            if a_product.getElementsByTagName(
                    'ProductName')[0].firstChild.nodeValue == u'Parallels Desktop':
                parallels = a_product
                v_major = parallels.getElementsByTagName(
                    'Major')[0].firstChild.nodeValue
                v_minor = parallels.getElementsByTagName(
                    'Minor')[0].firstChild.nodeValue
                v_sub_minor = parallels.getElementsByTagName(
                    'SubMinor')[0].firstChild.nodeValue
                v_sub_sub_minor = parallels.getElementsByTagName(
                    'SubSubMinor')[0].firstChild.nodeValue
                version = '.'.join(
                    [v_major, v_minor, v_sub_minor, v_sub_sub_minor])
                update = parallels.getElementsByTagName('Update')[0]
                try:
                    description = [x.firstChild.nodeValue for x in update.getElementsByTagName(
                        'UpdateDescription') if x.firstChild.nodeValue.startswith('en_US')][0]
                    description = '<html><body>%s</body></html>' % (
                        description.split('#', 1)[-1])
                except:
                    description = [x.firstChild.nodeValue for x in update.getElementsByTagName(
                        'UpdateDescription')][0]
                url = update.getElementsByTagName(
                    'FilePath')[0].firstChild.nodeValue

        self.env["version"] = version
        self.env["description"] = description
        self.env["url"] = url
        self.output("Found URL %s" % self.env["url"])

Example 49

Project: plone.app.event Source File: importer.py
    @button.buttonAndHandler(u'Save and Import')
    def handleSaveImport(self, action):
        data, errors = self.extractData()
        if errors:
            return False

        self.save_data(data)

        ical_file = data['ical_file']
        ical_url = data['ical_url']
        event_type = data['event_type']
        sync_strategy = data['sync_strategy']

        if ical_file or ical_url:

            if ical_file:
                # File upload is not saved in settings
                ical_resource = ical_file.data
                ical_import_from = ical_file.filename
            else:
                ical_resource = urllib2.urlopen(ical_url).read()
                ical_import_from = ical_url

            import_metadata = ical_import(
                self.context,
                ics_resource=ical_resource,
                event_type=event_type,
                sync_strategy=sync_strategy,
            )

            count = import_metadata['count']

            IStatusMessage(self.request).addStatusMessage(
                _('ical_import_imported',
                  default=u"${num} events imported from ${filename}",
                  mapping={'num': count, 'filename': ical_import_from}),
                'info')

        else:
            IStatusMessage(self.request).addStatusMessage(
                _('ical_import_no_ics',
                  default=u"Please provide either a icalendar ics file or a "
                          u"URL to a file."), 'error')

        self.request.response.redirect(self.context.absolute_url())

Example 50

Project: pwn_plug_sources Source File: plecost-0.2.2-9-beta.py
    def pluginlist_generate(self):
        '''
         Create popular plugin list
        '''
        url_count = 1
        plugin_count = 0
	plugin_cve = CVE()
	if not os.path.isfile(CVE_file): 
		plugin_cve.CVE_list("wordpress")
	stats = os.stat(CVE_file)
	if int(time.time()) - int(stats[8]) > ttl_cvelist : 
		print ""
		print "- CVE file is too old. Reload now?[y/n]:",
		opt = sys.stdin.readline()
		if opt.strip() == "y":
			print ""
			print "- Really?[y/n]:",
			opt = sys.stdin.readline()
			if opt.strip() == "y":
				print ""
				print "- Reloading CVE list... by patient"
				plugin_cve.CVE_list("wordpress")
		else: 
			print "- Maybe later."
	plugin_cve.CVE_loadlist()
	try:
		wp_file = file(PluginList,"w")
	except IOError:
	        print ""
		print "[!] Error opening file: \"" + PluginList + "\""
		print ""
		sys.exit(-1)
	final_countdown = 1
	end = 0
	tmpCount = 0
        while True:
                try:
			wpage = urllib2.urlopen(WPlug_URL+"/"+str(url_count)+"/").read()
		except URLError:
                        print ""
                        print "[!] Web site of plugin is not accesible."
                        print ""
			sys.exit(-1)
		url_count += 1
                wpsoup = BeautifulSoup(wpage)
                if str(wpsoup).find('plugin-block') == -1:
			print "Wordpress plugin list end:"
			break
                for ana in wpsoup.findAll('a'):
                        plugin_url = ana["href"]
                        if plugin_url.find("wordpress.org/extend/plugins/") != -1 and plugin_url.find("popular") == -1 and plugin_url.find("tags") == -1 and plugin_url.find("google.com") == -1 and plugin_url.find(".php") == -1:
				plugin_count += 1
                                if (plugin_url.split('/')[5] != '' ):
					name = plugin_url.split('/')[5]
                                        if len(ana.findNext('li').contents) == 2: 
						version = ana.findNext('li').contents[1]
					if name != "tac":
						cves = plugin_cve.CVE_search(plugin_url.split('/')[5])
					cves_l = ""
					for l in cves:
						cve_a = l+";"
						cves_l = cves_l + cve_a
					if type(version) == unicode:
                        			version = unicode(version, errors='replace')
                    			else:
                        			pass
					u_version = version.encode('utf-8')
					try:
						wp_file.write(name+","+u_version+","+cves_l+"\n")
					except Exception:
						pass
			if int(NumChecks) != -1 and (plugin_count - 1) == int(NumChecks): 
				end = 1
				break
		if end == 1: 
			break
	
		if tmpCount == 1:
			print plugin_count,
			print " plugins stored. Last plugin processed: " + name
			sys.stdout.flush()
			tmpCount = 0
		else:
			tmpCount+=1
	wp_file.close()
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3 Page 4