requests.certs.where

Here are the examples of the python api requests.certs.where taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

7 Examples 7

Example 1

Project: udemy-dl Source File: download.py
def curl_dl(link, filename):
    """Use curl as the downloader."""
    command = ['curl', '-C', '-', link, '-o', filename]

    cert_path = requests.certs.where()
    if cert_path:
        command.extend(['--cacert', cert_path])
    else:
        command.extend(['--insecure'])
    subprocess.call(command)

Example 2

Project: python-heatclient Source File: http.py
def get_system_ca_file():
    """Return path to system default CA file."""
    # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
    # Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca
    ca_path = ['/etc/ssl/certs/ca-certificates.crt',
               '/etc/pki/tls/certs/ca-bundle.crt',
               '/etc/ssl/ca-bundle.pem',
               '/etc/ssl/cert.pem',
               '/System/Library/OpenSSL/certs/cacert.pem',
               requests.certs.where()]
    for ca in ca_path:
        LOG.debug("Looking for ca file %s", ca)
        if os.path.exists(ca):
            LOG.debug("Using ca file %s", ca)
            return ca
    LOG.warning(_LW("System ca file could not be found."))

Example 3

Project: python-monascaclient Source File: http.py
Function: get_system_ca_file
def get_system_ca_file():
    """Return path to system default CA file."""
    # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
    # Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca
    ca_path = ['/etc/ssl/certs/ca-certificates.crt',
               '/etc/pki/tls/certs/ca-bundle.crt',
               '/etc/ssl/ca-bundle.pem',
               '/etc/ssl/cert.pem',
               '/System/Library/OpenSSL/certs/cacert.pem',
               requests.certs.where()]
    for ca in ca_path:
        LOG.debug("Looking for ca file %s", ca)
        if os.path.exists(ca):
            LOG.debug("Using ca file %s", ca)
            return ca
    LOG.warn("System ca file could not be found.")

Example 4

Project: python-muranoclient Source File: http.py
Function: get_system_ca_file
def get_system_ca_file():
    """Return path to system default CA file."""
    # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
    # Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca
    ca_path = ['/etc/ssl/certs/ca-certificates.crt',
               '/etc/pki/tls/certs/ca-bundle.crt',
               '/etc/ssl/ca-bundle.pem',
               '/etc/ssl/cert.pem',
               '/System/Library/OpenSSL/certs/cacert.pem',
               requests.certs.where()]
    for ca in ca_path:
        LOG.debug("Looking for ca file %s", ca)
        if os.path.exists(ca):
            LOG.debug("Using ca file %s", ca)
            return ca
    LOG.warning("System ca file could not be found.")

Example 5

Project: dx-toolkit Source File: __init__.py
def _get_pool_manager(verify, cert_file, key_file):
    global _pool_manager
    default_pool_args = dict(maxsize=32,
                             cert_reqs=ssl.CERT_REQUIRED,
                             ca_certs=_default_certs,
                             headers=_default_headers,
                             timeout=_default_timeout)
    if cert_file is None and verify is None and 'DX_CA_CERT' not in os.environ:
        with _pool_mutex:
            if _pool_manager is None:
                if 'HTTPS_PROXY' in os.environ:
                    proxy_params = _get_proxy_info(os.environ['HTTPS_PROXY'])
                    default_pool_args.update(proxy_params)
                    _pool_manager = urllib3.ProxyManager(**default_pool_args)
                else:
                    _pool_manager = urllib3.PoolManager(**default_pool_args)
            return _pool_manager
    else:
        # This is the uncommon case, normally, we want to cache the pool
        # manager.
        pool_args = dict(default_pool_args,
                         cert_file=cert_file,
                         key_file=key_file,
                         ca_certs=verify or os.environ.get('DX_CA_CERT') or requests.certs.where())
        if verify is False or os.environ.get('DX_CA_CERT') == 'NOVERIFY':
            pool_args.update(cert_reqs=ssl.CERT_NONE, ca_certs=None)
            urllib3.disable_warnings()
        if 'HTTPS_PROXY' in os.environ:
            proxy_params = _get_proxy_info(os.environ['HTTPS_PROXY'])
            pool_args.update(proxy_params)
            return urllib3.ProxyManager(**pool_args)
        else:
            return urllib3.PoolManager(**pool_args)

Example 6

Project: IkaLog Source File: certifi.py
Function: where
    @staticmethod
    def where():
        cacert_pem = IkaUtils.get_path('cacert.pem')
        if os.path.exists(cacert_pem):
            return cacert_pem

        try:
            import certifi
            cacert_pem = certifi.where()
            if os.path.exists(cacert_pem):
                return cacert_pem
        except ImportError:
            pass

        try:
            import requests.certs
            cacert_pem = requests.certs.where()
            if os.path.exists(cacert_pem):
                return cacert_pem
        except ImportError:
            pass

        IkaUtils.dprint('ikalog.utils.Certifi: Cannot find any cacert.pem')

        return None

Example 7

Project: redditDataExtractor Source File: redditDataExtractor.py
    def __init__(self):
        self._r = praw.Reddit(user_agent='Data Extractor for reddit v1.0 by /u/VoidXC')
        # set validate_certs to a path that is always valid for us (even when frozen with cx_Freeze)
        self._r.http.validate_certs = 'RedditDataExtractor/cacert.pem'
        # domains that are specifically targeted to work for downloading external content
        self._supportedDomains = ['imgur', 'minus', 'vidble', 'gfycat']
        # This is a regex to parse URLs, courtesy of John Gruber, http://daringfireball.net/2010/07/improved_regex_for_matching_urls
        # https://gist.github.com/gruber/8891611
        self._urlFinder = re.compile(
            r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))""",
            re.IGNORECASE)
        # Telling PRAW to get all the comments for a submission is expensive API-call wise. Avoid doing it more than once
        # for a single submission
        self._commentCache = {}

        self.defaultPath = pathlib.Path(os.path.expanduser('~')) / 'Downloads'

        # The list of default subs to start with on a fresh run of the program
        self.subredditLists = {'Default Subs': ListModel(
            [Subreddit("adviceanimals"), Subreddit("aww"), Subreddit("books"),
             Subreddit("earthpern"), Subreddit("funny"), Subreddit("gaming"),
             Subreddit("gifs"), Subreddit("movies"), Subreddit("music"),
             Subreddit("pics"),
             Subreddit("science"), Subreddit("technology"), Subreddit("television"),
             Subreddit("videos"), Subreddit("wtf")], Subreddit)}
        self.userLists = {'Default User List': ListModel([], User)}

        self.currentSubredditListName = 'Default Subs'
        self.currentUserListName = 'Default User List'

        self.defaultSubredditListName = 'Default Subs'
        self.defaultUserListName = 'Default User List'

        self.imgurAPIClientID = None

        # Restrict certain actions while program is downloading using this boolean
        self.currentlyDownloading = False

        # Filter stuff
        self.operMap = {"Equals": operator.eq, "Does not equal": operator.ne, "Begins with": beginWith,
                        "Does not begin with": notBeginWith, "Ends with": endWith,
                        "Does not end with": notEndWith, "Greater than": operator.gt,
                        "Less than": operator.lt, "Contains": operator.contains,
                        "Does not contain": notContain, "Equals bool": equalsBool}

        self.validOperForPropMap = {"boolean": {"Equals bool"},
                                    "number": {"Equals", "Does not equal", "Greater than", "Less than"},
                                    "string": {"Equals", "Does not equal", "Begins with", "Does not begin with",
                                               "Ends with", "Does not end with", "Greater than", "Less than",
                                               "Contains", "Does not contain"}}

        self.connectMap = {"And": all, "Or": any, "Xor": xorLst}
        self.submissionFilts = []
        self.commentFilts = []
        self.connector = None

        # Default setting values that can be set by the user in the settings panel
        self.filterExternalContent = False
        self.filterSubmissionContent = False
        self.downloadType = DownloadType.USER_SUBREDDIT_CONSTRAINED
        self.avoidDuplicates = True
        self.getExternalContent = False
        self.getCommentExternalContent = False
        self.getSelftextExternalContent = False
        self.getSubmissionContent = True
        self.subSort = 'hot'
        self.subLimit = 10
        self.restrictDownloadsByCreationDate = True
        self.showImgurAPINotification = True
        self.avoidVideos = False
        self.getAuthorsCommentsOnly = False

        try:
            path = pathlib.Path('RedditDataExtractor')
            if not path.exists():
                path.mkdir(parents=True)
            shutil.copyfile(requests.certs.where(), str(path / 'cacert.pem'))
        except:
            pass