json.loads

Here are the examples of the python api json.loads taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

171 Examples 7

Example 1

Project: BiliDan Source File: bilidan.py
def biligrab(url, *, debug=False, verbose=False, media=None, comment=None, cookie=None, quality=None, source=None, keep_fps=False, mpvflags=[], d2aflags={}, fakeip=None):

    url_get_metadata = 'http://api.bilibili.com/view?'
    url_get_comment = 'http://comment.bilibili.com/%(cid)s.xml'
    if source == 'overseas':
        url_get_media = 'http://interface.bilibili.com/v_cdn_play?'
    else:
        url_get_media = 'http://interface.bilibili.com/playurl?'

    def parse_url(url):
        '''Parse a bilibili.com URL

        Return value: (aid, pid)
        '''
        if url.startswith('cid:'):
            try:
                return int(url[4:]), 'cid'
            except ValueError:
                raise ValueError('Invalid CID: %s' % url[4:])
        regex = re.compile('(?:http:/*[^/]+/(?:video/)?)?av(\\d+)(?:/|/index.html|/index_(\\d+).html)?(?:\\?|#|$)')
        regex_match = regex.match(url)
        if not regex_match:
            raise ValueError('Invalid URL: %s' % url)
        aid = regex_match.group(1)
        pid = regex_match.group(2) or '1'
        return aid, pid

    def fetch_video_metadata(aid, pid):
        '''Fetch video metadata

        Arguments: aid, pid

        Return value: {'cid': cid, 'title': title}
        '''
        req_args = {'type': 'json', 'appkey': codecs.decode(APPKEY,'rot13'), 'id': aid, 'page': pid}
        req_args['sign'] = bilibili_hash(req_args)
        _, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
        # A naive fix (judge if it is -404, I choose '-' :)
        if(response[8] == 45):
            req_args = {'type': 'json', 'appkey': codecs.decode(APPKEY,'rot13'), 'id': aid, 'page': 1}
            req_args['sign'] = bilibili_hash(req_args)
            _, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
        try:
            response = dict(json.loads(response.decode('utf-8', 'replace')))
        except (TypeError, ValueError):
            raise ValueError('Can not get \'cid\' from %s' % url)
        if 'error' in response:
            logging.error('Error message: %s' % response.get('error'))
        if 'cid' not in response:
            raise ValueError('Can not get \'cid\' from %s' % url)
        return response

    def get_media_urls(cid, *, cuem_you_bishi_mode=False):
        '''Request the URLs of the video

        Arguments: cid

        Return value: [media_urls]
        '''
        if source in {None, 'overseas'}:
            user_agent = USER_AGENT_API if not feck_you_bishi_mode else USER_AGENT_PLAYER
            req_args = {'cid': cid}
            if quality is not None:
                req_args['quality'] = quality
            else:
                req_args['quality'] = None
            _, response = fetch_url(url_get_media+andro_mock(req_args), user_agent=user_agent, cookie=cookie, fakeip=fakeip)
            '''
            media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
            '''
            json_obj = json.loads(response.decode('utf-8'))
            if json_obj['result'] != 'suee':  # => Not Success
                raise ValueError('Server returned an error: %s (%s)' % (json_obj['result'], json_obj['code']))
            media_urls = [str(i['url']).strip() for i in json_obj['durl']]
            if not feck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
                logging.error('Detected User-Agent block. Switching to feck-you-bishi mode.')
                return get_media_urls(cid, feck_you_bishi_mode=True)
        elif source == 'html5':
            req_args = {'aid': aid, 'page': pid}
            logging.warning('HTML5 video source is experimental and may not always work.')
            _, response = fetch_url('http://www.bilibili.com/m/html5?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
            response = json.loads(response.decode('utf-8', 'replace'))
            media_urls = [dict.get(response, 'src')]
            if not media_urls[0]:
                media_urls = []
            if not feck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
                logging.error('Failed to request HTML5 video source. Retrying.')
                return get_media_urls(cid, feck_you_bishi_mode=True)
        elif source == 'flvcd':
            req_args = {'kw': url}
            if quality is not None:
                if quality == 3:
                    req_args['quality'] = 'high'
                elif quality >= 4:
                    req_args['quality'] = 'super'
            _, response = fetch_url('http://www.flvcd.com/parse.php?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
            resp_match = re.search('<input type="hidden" name="inf" value="([^"]+)"', response.decode('gbk', 'replace'))
            if resp_match:
                media_urls = resp_match.group(1).rstrip('|').split('|')
            else:
                media_urls = []
        elif source == 'bilipr':
            req_args = {'cid': cid}
            quality_arg = '1080' if quality is not None and quality >= 4 else '720'
            logging.warning('BilibiliPr video source is experimental and may not always work.')
            resp_obj, response = fetch_url('http://pr.lolly.cc/P%s?%s' % (quality_arg, urllib.parse.urlencode(req_args)), user_agent=USER_AGENT_PLAYER)
            if resp_obj.getheader('Content-Type', '').startswith('text/xml'):
                media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
            else:
                media_urls = []
        else:
            assert source in {None, 'overseas', 'html5', 'flvcd', 'bilipr'}
        if len(media_urls) == 0 or media_urls == ['http://static.hdslb.com/error.mp4']:
            raise ValueError('Can not get valid media URLs.')
        return media_urls

    def get_video_size(media_urls):
        '''Determine the resolution of the video

        Arguments: [media_urls]

        Return value: (width, height)
        '''
        try:
            if media_urls[0].startswith('http:') or media_urls[0].startswith('https:'):
                ffprobe_command = ['ffprobe', '-icy', '0', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '-timeout', '60000000', '-user-agent', USER_AGENT_PLAYER, '--', media_urls[0]]
            else:
                ffprobe_command = ['ffprobe', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '--', media_urls[0]]
            log_command(ffprobe_command)
            ffprobe_process = subprocess.Popen(ffprobe_command, stdout=subprocess.PIPE)
            try:
                ffprobe_output = json.loads(ffprobe_process.communicate()[0].decode('utf-8', 'replace'))
            except KeyboardInterrupt:
                logging.warning('Cancelling getting video size, press Ctrl-C again to terminate.')
                ffprobe_process.terminate()
                return 0, 0
            width, height, widthxheight = 0, 0, 0
            for stream in dict.get(ffprobe_output, 'streams') or []:
                if dict.get(stream, 'width')*dict.get(stream, 'height') > widthxheight:
                    width, height = dict.get(stream, 'width'), dict.get(stream, 'height')
            return width, height
        except Exception as e:
            log_or_raise(e, debug=debug)
            return 0, 0

    def convert_comments(cid, video_size):
        '''Convert comments to ASS subtitle format

        Arguments: cid

        Return value: comment_out -> file
        '''
        _, resp_comment = fetch_url(url_get_comment % {'cid': cid}, cookie=cookie)
        comment_in = io.StringIO(resp_comment.decode('utf-8', 'replace'))
        comment_out = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8-sig', newline='\r\n', prefix='tmp-danmaku2ass-', suffix='.ass', delete=False)
        logging.info('Invoking Danmaku2ASS, converting to %s' % comment_out.name)
        d2a_args = dict({'stage_width': video_size[0], 'stage_height': video_size[1], 'font_face': 'SimHei', 'font_size': math.ceil(video_size[1]/21.6), 'text_opacity': 0.8, 'duration_marquee': min(max(6.75*video_size[0]/video_size[1]-4, 3.0), 8.0), 'duration_still': 5.0}, **d2aflags)
        for i, j in ((('stage_width', 'stage_height', 'reserve_blank'), int), (('font_size', 'text_opacity', 'comment_duration', 'duration_still', 'duration_marquee'), float)):
            for k in i:
                if k in d2aflags:
                    d2a_args[k] = j(d2aflags[k])
        try:
            danmaku2ass.Danmaku2ASS(input_files=[comment_in], input_format='Bilibili', output_file=comment_out, **d2a_args)
        except Exception as e:
            log_or_raise(e, debug=debug)
            logging.error('Danmaku2ASS failed, comments are disabled.')
        comment_out.flush()
        comment_out.close()  # Close the temporary file early to fix an issue related to Windows NT file sharing
        return comment_out

    def launch_player(video_metadata, media_urls, comment_out, is_playlist=False, increase_fps=True):
        '''Launch MPV media player

        Arguments: video_metadata, media_urls, comment_out

        Return value: player_exit_code -> int
        '''
        mpv_version_master = tuple(int(i) if i.isdigit() else float('inf') for i in check_env.mpv_version.split('-', 1)[0].split('.'))
        mpv_version_gte_0_10 = mpv_version_master >= (0, 10)
        mpv_version_gte_0_6 = mpv_version_gte_0_10 or mpv_version_master >= (0, 6)
        mpv_version_gte_0_4 = mpv_version_gte_0_6 or mpv_version_master >= (0, 4)
        logging.debug('Compare mpv version: %s %s 0.10' % (check_env.mpv_version, '>=' if mpv_version_gte_0_10 else '<'))
        logging.debug('Compare mpv version: %s %s 0.6' % (check_env.mpv_version, '>=' if mpv_version_gte_0_6 else '<'))
        logging.debug('Compare mpv version: %s %s 0.4' % (check_env.mpv_version, '>=' if mpv_version_gte_0_4 else '<'))
        if increase_fps:  # If hardware decoding (without -copy suffix) is used, do not increase fps
            for i in mpvflags:
                i = i.split('=', 1)
                if 'vdpau' in i or 'vaapi' in i or 'vda' in i:
                    increase_fps = False
                    break
        command_line = ['mpv', '--autofit', '950x540']
        if mpv_version_gte_0_6:
            command_line += ['--cache-file', 'TMP']
        if increase_fps and mpv_version_gte_0_6:  # Drop frames at vo side but not at decoder side to prevent A/V sync issues
            command_line += ['--framedrop', 'vo']
        command_line += ['--http-header-fields', 'User-Agent: '+USER_AGENT_PLAYER.replace(',', '\\,')]
        if mpv_version_gte_0_6:
            if mpv_version_gte_0_10:
                command_line += ['--force-media-title', video_metadata.get('title', url)]
            else:
                command_line += ['--media-title', video_metadata.get('title', url)]
        if is_playlist or len(media_urls) > 1:
            command_line += ['--merge-files']
        if mpv_version_gte_0_4:
            command_line += ['--no-video-aspect', '--sub-ass', '--sub-file', comment_out.name]
        else:
            command_line += ['--no-aspect', '--ass', '--sub', comment_out.name]
        if increase_fps:
            if mpv_version_gte_0_6:
                command_line += ['--vf', 'lavfi="fps=fps=60:round=down"']
            else:  # Versions < 0.6 have an A/V sync related issue
                command_line += ['--vf', 'lavfi="fps=fps=50:round=down"']
        command_line += mpvflags
        if is_playlist:
            command_line += ['--playlist']
        else:
            command_line += ['--']
        command_line += media_urls
        log_command(command_line)
        player_process = subprocess.Popen(command_line)
        try:
            player_process.wait()
        except KeyboardInterrupt:
            logging.info('Terminating media player...')
            try:
                player_process.terminate()
                try:
                    player_process.wait(timeout=2)
                except subprocess.TimeoutExpired:
                    logging.info('Killing media player by force...')
                    player_process.kill()
            except Exception:
                pass
            raise
        return player_process.returncode

    aid, pid = parse_url(url)

    logging.info('Loading video info...')
    if pid != 'cid':
        video_metadata = fetch_video_metadata(aid, pid)
    else:
        video_metadata = {'cid': aid, 'title': url}
    logging.info('Got video cid: %s' % video_metadata['cid'])

    logging.info('Loading video content...')
    if media is None:
        media_urls = get_media_urls(video_metadata['cid'])
    else:
        media_urls = [media]
    logging.info('Got media URLs:'+''.join(('\n      %d: %s' % (i+1, j) for i, j in enumerate(media_urls))))

    logging.info('Determining video resolution...')
    video_size = get_video_size(media_urls)
    logging.info('Video resolution: %sx%s' % video_size)
    if video_size[0] > 0 and video_size[1] > 0:
        video_size = (video_size[0]*1080/video_size[1], 1080)  # Simply fix ASS resolution to 1080p
    else:
        log_or_raise(ValueError('Can not get video size. Comments may be wrongly positioned.'), debug=debug)
        video_size = (1920, 1080)

    logging.info('Loading comments...')
    if comment is None:
        comment_out = convert_comments(video_metadata['cid'], video_size)
    else:
        comment_out = open(comment, 'r')
        comment_out.close()

    logging.info('Launching media player...')
    player_exit_code = launch_player(video_metadata, media_urls, comment_out, increase_fps=not keep_fps)

    if comment is None and player_exit_code == 0:
        os.remove(comment_out.name)

    return player_exit_code

Example 2

Project: dashman Source File: dashvote.py
def main(screen):

    global stdscr
    global votecount
    global window_width
    global max_yeacount_len
    global max_naycount_len
    global max_percentage_len
    global ballot_entries
    global votewin
    global masternodes
    global C_YELLOW, C_GREEN, C_RED, C_CYAN

    stdscr = screen
    stdscr.scrollok(1)

    git_describe = run_command(
        'GIT_DIR=%s GIT_WORK_TREE=%s git describe' %
        (git_dir + '/.git', git_dir)).rstrip("\n").split('-')
    try:
        GIT_VERSION = ('-').join((git_describe[i] for i in [1, 2]))
        version = 'v' + VERSION + ' (' + GIT_VERSION + ')'
    except IndexError:
        version = 'v' + VERSION

    try:
        curses.curs_set(2)
    except:
        pass
    if curses.has_colors():
        curses.start_color()
        curses.use_default_colors()
        for i in range(0, curses.COLORS):
            curses.init_pair(i + 1, i, -1)

    C_CYAN = curses.color_pair(7)
    C_YELLOW = curses.color_pair(4)
    C_GREEN = curses.color_pair(3)
    C_RED = curses.color_pair(2)

    # test dash-cli in path -- TODO make robust
    try:
        run_command('dash-cli getinfo')
    except subprocess.CalledProcessError:
        quit(
            "--> cannot find dash-cli in $PATH\n" +
            "    do: export PATH=/path/to/dash-cli-folder:$PATH\n" +
            "    and try again\n")

    loadwin = curses.newwin(40, 40, 1, 2)

    loadwin.addstr(1, 2, 'dashvote version: ' + version, C_CYAN)
    loadwin.addstr(2, 2, 'loading votes... please wait', C_GREEN)
    loadwin.refresh()

    mncount = int(run_command('dash-cli masternode count'))
    block_height = int(run_command('dash-cli getblockcount'))
    # get ballot
    ballots = json.loads(run_command('dash-cli mnbudget show'))
    ballot = {}
    for entry in ballots:
        # prune expired proposals
        if ballots[entry][u'BlockEnd'] < block_height:
            continue
        # prune completely funded proposals
        if ballots[entry][u'RemainingPaymentCount'] < 1:
            continue
        ballots[entry][u'vote'] = 'ABSTAIN'
        ballots[entry][u'votes'] = json.loads(
            run_command(
                'dash-cli mnbudget getvotes %s' %
                entry))
        ballot[entry] = ballots[entry]
    ballot_entries = sorted(ballot, key=lambda s: ballot[s]['BlockStart'])
    votecount = len(ballot_entries)
    max_proposal_len = 0
    max_yeacount_len = 0
    max_naycount_len = 0
    max_percentage_len = 0
    for entry in ballot_entries:
        yeas = ballot[entry][u'Yeas']
        nays = ballot[entry][u'Nays']
        percentage = "{0:.1f}".format(
            (float((yeas + nays)) / float(mncount)) * 100)
        ballot[entry][u'vote_turnout'] = percentage
        ballot[entry][u'vote_threshold'] = (
            yeas + nays) > mncount/10 and True or False
        ballot[entry][u'vote_passing'] = (
            yeas - nays) > mncount/10 and True or False
        max_proposal_len = max(
            max_proposal_len,
            len(entry))
        max_yeacount_len = max(max_yeacount_len, len(str(yeas)))
        max_naycount_len = max(max_naycount_len, len(str(nays)))
        max_percentage_len = max(max_percentage_len, len(str(percentage)))

    # extract mnprivkey,txid-txidx from masternode.conf
    masternodes = {}
    with open(os.path.join(dash_conf_dir, 'masternode.conf'), 'r') as f:
        lines = list(
            line
            for line in
            (l.strip() for l in f)
            if line and not line.startswith('#'))
        for line in lines:
            conf = line.split()
            masternodes[conf[3] + '-' + conf[4]] = {
                "alias": conf[0],
                "mnprivkey": conf[2],
                "fundtx": conf[3] +
                '-' +
                conf[4],
                "txid": conf[3],
                "txout": conf[4]}
    if not masternodes:
        # fallback to dash.conf entries if no masternode.conf entries
        with open(os.path.join(dash_conf_dir, 'dash.conf'), 'r') as f:
            lines = list(
                line
                for line in
                (l.strip() for l in f)
                if line and not line.startswith('#'))
            conf = {}
            for line in lines:
                n, v = line.split('=')
                conf[n.strip(' ')] = v.strip(' ')
            conf['masternodeaddr'] = re.sub(
                '[\[\]]',
                '',
                conf['masternodeaddr'])
            if all(k in conf for k in ('masternode', 'masternodeaddr', 'masternodeprivkey')):
                # get funding tx from dashninja
                import urllib2
                mninfo = urllib2.urlopen(
                    "https://dashninja.pl/api/masternodes?ips=[\"" +
                    conf['masternodeaddr'] +
                    "\"]&portcheck=1").read()
                try:
                    mndata = json.loads(mninfo)
                    d = mndata[u'data'][0]
                except:
                    quit('cannot retrieve masternode info from dashninja')
                vin = str(d[u'MasternodeOutputHash'])
                vidx = str(d[u'MasternodeOutputIndex'])
                masternodes[vin + '-' + vidx] = {
                    "alias": conf['masternodeaddr'],
                    "mnprivkey": conf['masternodeprivkey'],
                    "fundtx": vin +
                    '-' +
                    vidx,
                    "txid": vin,
                    "txout": vidx}
            else:
                quit('cannot find masternode information in dash.conf')

    # TODO open previous votes/local storage something
    for entry in ballot:
        ballot[entry][u'previously_voted'] = 0
        for hash in ballot[entry][u'votes']:
            if hash in masternodes:
                if ballot[entry][u'votes'][hash][u'Vote'] == 'YES':
                    ballot[entry][u'previously_voted'] = 1
                else:
                    ballot[entry][u'previously_voted'] = 2

    loadwin.erase()
    window_width = 35
    window_width = max(window_width, max_proposal_len +
                       max_percentage_len +
                       max_yeacount_len +
                       max_naycount_len +
                       len(str(len(masternodes))))
    votewin = curses.newwin(votecount + 9, window_width + 17, 1, 2)
    votewin.keypad(1)
    votewin.border()

    votewin.addstr(1, 2, 'dashvote version: ' + version, C_CYAN)
    votewin.addstr(
        2,
        2,
        'use arrow keys to set votes for %s masternodes' %
        len(masternodes),
        C_YELLOW)
    votewin.addstr(3, 2, 'hit enter on CONFIRM to vote - q to quit', C_YELLOW)
    votewin.addstr(4, 3, '*', C_GREEN)
    votewin.addstr(4, 4, '/', C_CYAN)
    votewin.addstr(4, 5, '*', C_RED)
    votewin.addstr(4, 7, '== previously voted proposal (yes/no)', C_YELLOW)
    _y = 5
    for entry in ballot_entries:
        _y += 1
        x = 4
        yeas = ballot[entry][u'Yeas']
        nays = ballot[entry][u'Nays']
        percentage = ballot[entry][u'vote_turnout']
        passing = ballot[entry][u'vote_passing']
        threshold = ballot[entry][u'vote_threshold']
        if ballot[entry][u'previously_voted'] > 0:
            direction = ballot[entry][u'previously_voted']
            votewin.addstr(_y, x-1, '*', direction == 1 and C_GREEN or C_RED)

        fmt_entry = "%-"+str(max_proposal_len + 2)+"s"
        votewin.addstr(
            _y,
            x,
            fmt_entry % entry,
            passing and C_GREEN or threshold and C_RED or C_YELLOW)

        for x in range(max_yeacount_len - len(str(yeas))):
            votewin.addstr(' ')

        votewin.addstr(str(yeas), C_GREEN)
        votewin.addstr('/', C_CYAN)
        votewin.addstr(str(nays), C_RED)
        votewin.addstr(' ')

        for x in range(max_naycount_len - len(str(nays))):
            votewin.addstr(' ')

        for x in range(max_percentage_len - len(str(percentage))):
            votewin.addstr(' ')

        votewin.addstr(str(percentage) + "%", C_CYAN)

        votewin.addstr(' ')
        votewin.addstr('ABSTAIN', C_YELLOW)
    votewin.addstr(
        _y + 2,
        window_width + 7,
        'confirm',
        C_YELLOW)
    votewin.move(0 + 6, window_width + 7)

    votewin.refresh()

    keys = {
        113: lambda s: quit(),
        curses.KEY_UP: lambda s: prev_vote(s),
        curses.KEY_DOWN: lambda s: next_vote(s),
        curses.KEY_RIGHT: lambda s: set_vote(ballot, s, 1),
        curses.KEY_LEFT: lambda s: set_vote(ballot, s, -1),
        107: lambda s: prev_vote(s),
        106: lambda s: next_vote(s),
        108: lambda s: set_vote(ballot, s, 1),
        104: lambda s: set_vote(ballot, s, -1),
        10: lambda s: submit_votes(stdscr, ballot, s)
    }

    sel_vote = 0
    while True:
        key = votewin.getch()
        f = keys.get(key)
        if hasattr(f, '__call__'):
            sel_vote = f(sel_vote)
            try:
                entry_vote = ballot[ballot_entries[sel_vote]][u'vote']
            except IndexError:
                # CONFIRM button
                entry_vote = ''
            if key != 10:
                update_vote_display(votewin, sel_vote, entry_vote)

Example 3

Project: openpgp-python Source File: import_keys.py
    def handle(self, *args, **options):
        for filename in options['file']:
            paths = glob.glob(os.path.expanduser(filename))
            for path in paths:
                if path.endswith(".gz"):
                    f = gzip.GzipFile(path)
                else:
                    f = open(path)

                count_new = 0
                count_updated = 0
                for i, line in enumerate(f):
                    key = json.loads(line)

                    #try to find existing key
                    key_obj = None
                    matching_keys = models.PublicKey.objects.filter(
                        long_keyid=key.get("key_id", None))
                    for k in matching_keys:
                        if json.loads(k.json)['packet_raw'] == key['packet_raw']:
                            key_obj = k
                            count_updated += 1
                            break

                    #create new key if couldn't find an existing one
                    if key_obj is None:
                        count_new += 1
                        key_json = dict((k, v) for k, v in key.items() if k != "packets")
                        key_obj = models.PublicKey(json=json.dumps(key_json, sort_keys=True, indent=4))


                    #update the public key attributes
                    key_obj.errors = json.dumps(key['error_msg'], sort_keys=True, indent=4) if key.get("error_msg", None) is not None else None
                    key_obj.short_keyid = key['key_id'][-8:] if key.get("key_id", None) is not None else None
                    key_obj.long_keyid = key['key_id'] if key.get("key_id", None) is not None else None
                    key_obj.fingerprint = key['fingerprint'] if key.get("fingerprint", None) is not None else None
                    key_obj.created = make_aware(datetime.utcfromtimestamp(key['creation_time'])) if key.get("creation_time", None) is not None else None
                    key_obj.algo_id = key['algo_id'] if key.get("algo_id", None) is not None else None
                    key_obj.save()

                    #got through the packets and insert as needed
                    signature_target = key_obj
                    for packet in key.get("packets", []):

                        #SubKey
                        if packet['tag_id'] == 14:

                            #try to find existing user_id
                            subkey_obj = None
                            matching_subkeys = models.SubKey.objects.filter(publickey=key_obj)
                            for u in matching_subkeys:
                                if json.loads(u.json)['packet_raw'] == packet['packet_raw']:
                                    subkey_obj = u
                                    break

                            #create new SubKey if couldn't find an existing one
                            if subkey_obj is None:
                                subkey_obj = models.SubKey(json=json.dumps(packet, sort_keys=True, indent=4), publickey=key_obj)

                            #update the SubKey attributes
                            subkey_obj.errors = json.dumps(packet['error_msg'], sort_keys=True, indent=4) if packet.get("error_msg", None) is not None else None
                            subkey_obj.short_keyid = packet['key_id'][-8:] if packet.get("key_id", None) is not None else None
                            subkey_obj.long_keyid = packet['key_id'] if packet.get("key_id", None) is not None else None
                            subkey_obj.fingerprint = packet['fingerprint'] if packet.get("fingerprint", None) is not None else None
                            subkey_obj.created = make_aware(datetime.utcfromtimestamp(packet['creation_time'])) if packet.get("creation_time", None) is not None else None
                            subkey_obj.algo_id = packet['algo_id'] if packet.get("algo_id", None) is not None else None
                            subkey_obj.save()

                            signature_target = subkey_obj

                        #UserID
                        elif packet['tag_id'] == 13:

                            #try to find existing user_id
                            userid_obj = None
                            matching_userids = models.UserID.objects.filter(publickey=key_obj)
                            for u in matching_userids:
                                if json.loads(u.json)['packet_raw'] == packet['packet_raw']:
                                    userid_obj = u
                                    break

                            #create new UserID if couldn't find an existing one
                            if userid_obj is None:
                                userid_obj = models.UserID(json=json.dumps(packet, sort_keys=True, indent=4), publickey=key_obj)

                            #update the UserID attributes
                            userid_obj.errors = json.dumps(packet['error_msg'], sort_keys=True, indent=4) if packet.get("error_msg", None) is not None else None
                            userid_obj.text = packet['user_id'] if packet.get("user_id", None) is not None else None
                            userid_obj.save()

                            signature_target = userid_obj

                        #UserAttribute
                        elif packet['tag_id'] == 17:

                            #try to find existing user_attribute
                            useratt_obj = None
                            matching_useratts = models.UserAttribute.objects.filter(publickey=key_obj)
                            for u in matching_useratts:
                                if json.loads(u.json)['packet_raw'] == packet['packet_raw']:
                                    useratt_obj = u
                                    break

                            #create new UserAttribute if couldn't find an existing one
                            if useratt_obj is None:
                                useratt_obj = models.UserAttribute(json=json.dumps(packet, sort_keys=True, indent=4), publickey=key_obj)

                            #update the UserAttribute attributes
                            useratt_obj.errors = json.dumps(packet['error_msg'], sort_keys=True, indent=4) if packet.get("error_msg", None) is not None else None
                            useratt_obj.save()

                            signature_target = useratt_obj

                            #update the images for the user attribute
                            for img in packet['subpackets']:

                                #find any existing images
                                image_obj = None
                                matching_images = models.Image.objects.filter(userattribute=useratt_obj)
                                for jpg in matching_images:
                                    if jpg.image == img.get("image", None):
                                        image_obj = jpg
                                        break

                                #create new Image if couldn't find an existing one
                                if image_obj is None:
                                    image_obj = models.Image.objects.create(
                                        userattribute=useratt_obj,
                                        encoding=img.get("encoding", None),
                                        image=img.get("image", None),
                                    )

                        #Signature
                        elif packet['tag_id'] == 2:

                            #try to find existing signature
                            sig_obj = None
                            matching_sigs = models.Signature.objects.filter(publickey=key_obj)
                            for s in matching_sigs:
                                if json.loads(s.json)['packet_raw'] == packet['packet_raw']:
                                    sig_obj = s
                                    break

                            #create new Signature if couldn't find an existing one
                            if sig_obj is None:
                                sig_obj = models.Signature(json=json.dumps(packet, sort_keys=True, indent=4), publickey=key_obj)

                            #update the Signature attributes
                            sig_obj.errors = json.dumps(packet['error_msg'], sort_keys=True, indent=4) if packet.get("error_msg", None) is not None else None
                            sig_obj.subkey = signature_target if isinstance(signature_target, models.SubKey) else None
                            sig_obj.userid = signature_target if isinstance(signature_target, models.UserID) else None
                            sig_obj.userattribute = signature_target if isinstance(signature_target, models.UserAttribute) else None
                            sig_obj.signature_type = packet['signature_type_id'] if packet.get("signature_type_id", None) is not None else None
                            sig_obj.pubkey_algo_id = packet['pubkey_algo_id'] if packet.get("pubkey_algo_id", None) is not None else None
                            sig_obj.hash_algo_id = packet['hash_algo_id'] if packet.get("hash_algo_id", None) is not None else None
                            sig_obj.subpackets = json.dumps(packet['subpackets'], sort_keys=True, indent=4) if packet.get("subpackets", None) is not None else None

                            #find created time and signer key_id (version 3)
                            if packet.get("creation_time", None) is not None:
                                sig_obj.created = make_aware(datetime.utcfromtimestamp(packet['creation_time']))
                            if packet.get("key_id", None) is not None:
                                sig_obj.signer_hex = packet['key_id']

                            #find created time and signer key_id (version 4)
                            for sp in packet.get("subpackets", []):
                                if sp['type_id'] == 2 and sp.get("creation_time", None) is not None:
                                    sig_obj.created = make_aware(datetime.utcfromtimestamp(sp['creation_time']))
                                elif sp['type_id'] == 16 and sp.get("key_id", None) is not None:
                                    sig_obj.signer_hex = sp['key_id']

                            sig_obj.save()

                            #mark as self-signature
                            if sig_obj.signer_hex is not None:
                                sig_obj.is_selfsig = sig_obj.signer_hex == key_obj.long_keyid
                                if sig_obj.is_selfsig:
                                    sig_obj.signer = key_obj
                                sig_obj.save()


                    #print a status update
                    if i % 100 == 99:
                        print "Saved {} public keys ({} new, {} updated) from {}...".format(
                            i+1, count_new, count_updated, path)

                print "Done! Saved {} keys ({} new, {} updated) from {}!".format(
                    i+1, count_new, count_updated, path)

Example 4

Project: data-import Source File: update_assets.py
Function: main
def main():

	parser = get_parser()
	args = parser.parse_args()

	if args[0].help or not (args[0].file or args[0].fields) or not args[0].ip or not args[0].token :
		print >> sys.stderr, "A simple utility to load a CSV file with asset information into the QRadar asset model based on IP address (which must exist in QRadar)"
		print >> sys.stderr, "The first column of the first line of the file must be 'ipaddress'"
		print >> sys.stderr, "The remaining columns of the file must contain field name headers that match the asset properties being loaded"
		print >> sys.stderr, "The asset with the most recent occurrence of the ip address is updated with the remaining fields on the line"
		print >> sys.stderr, "";
		print >> sys.stderr, "example:"
		print >> sys.stderr, "";
		print >> sys.stderr, "ipaddress,Technical Owner,Location,Description"
		print >> sys.stderr, "172.16.129.128,Chris Meenan,UK,Email Server"
		print >> sys.stderr, "172.16.129.129,Joe Blogs,Altanta,Customer Database Server"
		print >> sys.stderr, "172.16.129.130,Jason Corbin,Boston,Application Server"
		print >> sys.stderr, "";
		print >> sys.stderr, parser.format_help().strip() 
		exit(0)

	# Creates instance of APIClient. It contains all of the API methods.
	api_client = RestApiClient(args)

	# retrieve all the asset fields
	print("Retrieving asset fields");
	response = api_client.call_api('asset_model/properties', 'GET',None, {},None)
    
	# Each response contains an HTTP response code.
	response_json = json.loads(response.read().decode('utf-8'))
	if response.code != 200:
		print("When retrieving assets : " + str(response.code))
		print(json.dumps(response_json, indent=2, separators=(',', ':')))
		exit(1)

	asset_field_lookup = {}
	if ( args[0].fields ):
		print("Asset fields:")
	for asset_field in response_json:
		asset_field_lookup[ asset_field['name' ] ] = asset_field['id']
		if ( args[0].fields ):
			print(asset_field['name' ])

	if( not args[0].file ):
		exit(1)

	# open file and get query
	file = open(args[0].file, 'r')

	if file == None:
		print("File not found " + args[0].file)
		exit(1)

	# This is the asset data to load, need to check all the names exist
	columnnames = file.readline().strip();
	fields = columnnames.split(',');

	asset_file_fields = {}
	field_index = 0;
	is_error = 0;
	for fname in fields:
		if (fname <> 'ipaddress') and (asset_field_lookup.get(fname,'')==''):
			print("Field = " + fname + " does not exist")
			is_error = 1
		elif( fname == 'ipaddress' ):
			asset_file_fields[ field_index ] = 0 
		else:
			asset_file_fields[ field_index ] = asset_field_lookup[ fname ]
		field_index = field_index + 1;

	# if there was an error print out the field
	if is_error == 1:
		print("Assets field: ")
		for k, v in asset_field_lookup.items():
			print(k)
		exit(1)
		
	# retrieve all the assets
	print("Retrieving assets from QRadar");
	response = api_client.call_api('asset_model/assets', 'GET',None, {},None)


	# Each response contains an HTTP response code.
	response_json = json.loads(response.read().decode('utf-8'))
	if response.code != 200:
		print("When retrieving assets : " + str(response.code))
		print(json.dumps(response_json, indent=2, separators=(',', ':')))
		exit(1)
    
	print( str(len(response_json)) + " assets retrieved");
	# loop over assets and add to a lookup table
	ip_assetid_lookup = {}
	ip_lastseen_lookup = {}

	for asset in response_json:
		interfaces = asset['interfaces'];
		for interface in interfaces:
			for ipaddresses in interface['ip_addresses']:

				# get the largest last seen we have from this asset
				max_last_seen = ipaddresses['last_seen_scanner']
				if ( ipaddresses['last_seen_profiler'] > max_last_seen ):
					max_last_seen = ipaddresses['last_seen_profiler']

				# look to see if we have seen this IP address before
				last_seen = ip_lastseen_lookup.get( ipaddresses['value'] ,-1);
				if (last_seen == -1) or (last_seen < max_last_seen):
					ip_lastseen_lookup[ ipaddresses['value'] ] = max_last_seen
					ip_assetid_lookup[ ipaddresses['value'] ] = asset['id']

	# now we have loaded the assets and mapped ip address to asset id 
	# we can loop over the file
	data = file.readline().strip()
	
	update_success = 0;
	current_line = 2;
	while data <> '':
		
		# split values
		data_fields = data.split(',')

		json_string = "{ \"properties\": [ "
		index = 0;
		ip_address = '';
		if( len(data_fields) != len(asset_file_fields)):
			print("Error : Missing or extra fields at line " + str(current_line) )
		else:
			ip_address_found=0
			for data_field in data_fields:
				data_field = data_field.strip()
				# this is the IP address
				if index ==0:
					ip_address = data_field
					if( ip_assetid_lookup.get(ip_address,'') == '' ):
						print("Error : IP address " + ip_address + " at line " + str(current_line) + " does not exist in QRadar Asset DB")
					else:
						ip_address_found = 1
				else:
					json_string = json_string + "{ \"type_id\":" + str(asset_file_fields[index]) + ",\"value\":\"" + data_field + "\"}"

				index = index + 1;
				if (index < len(data_fields)) and (index <> 1):
					json_string = json_string + ","

			if ip_address_found == 1:
				json_string = json_string + "]}"

				#print(" JSON = " + json_string)			
				# create JSON object
		
				response = api_client.call_api('asset_model/assets/'+str(ip_assetid_lookup[ip_address]), 'POST',{b'Accept': 'text/plain' },{},json_string)
				# Each response contains an HTTP response code.
				if response.code != 200:
					response_json = json.loads(response.read().decode('utf-8'))
					print("When updating asset : " + str(ip_assetid_lookup[ip_address]) + " " + ip_address)
					print(" JSON = " + json_string)			
					print(json.dumps(response_json, indent=2, separators=(',', ':')))
					exit(1)
				update_success = update_success + 1
    
		data = file.readline().strip()
		current_line = current_line + 1
	print( str(update_success) + " assets sucessfully updated")

Example 5

Project: pkgdb2 Source File: test_flask_api_acls.py
    @patch('pkgdb2.lib.utils.get_packagers')
    @patch('pkgdb2.packager_login_required')
    @patch('pkgdb2.lib.utils.get_bz_email_user')
    def test_acl_update(self, bz_mail_func, login_func, pkger_func):
        """ Test the api_acl_update function.  """
        login_func.return_value = None
        bz_mail_func.return_value = 1

        output = self.app.post('/api/package/acl')
        self.assertEqual(output.status_code, 301)

        user = FakeFasUser()
        with user_set(APP, user):
            output = self.app.post('/api/package/acl/')
            self.assertEqual(output.status_code, 500)
            data = json.loads(output.data)
            self.assertEqual(
                sorted(data),
                ['error', 'error_detail', 'output']
            )
            self.assertEqual(
                data['error'], "Invalid input submitted")

            self.assertEqual(
                data['output'], "notok")

            self.assertEqual(
                sorted(data['error_detail']),
                [
                    "acl: This field is required.",
                    "acl_status: Not a valid choice",
                    "branches: This field is required.",
                    "pkgname: This field is required.",
                    "user: This field is required.",
                ]
            )

        create_package_acl(self.session)

        data = {
            'namespace': 'rpms',
            'pkgname': 'guake',
            'branches': 'master',
            'acl': 'commit',
            'acl_status': 'Approved',
            'user': 'toshio',
        }

        # Check if it works authenticated
        user = FakeFasUser()
        pkger_func.return_value = ['pingou', 'ralph', 'toshio']

        with user_set(APP, user):
            exp = {
                "messages": [
                    "user: pingou set for toshio acl: commit of package: "
                    "guake from: Awaiting Review to: Approved on branch: "
                    "master"
                ],
                "output": "ok"
            }
            output = self.app.post('/api/package/acl/', data=data)
            json_out = json.loads(output.data)
            self.assertEqual(output.status_code, 200)
            self.assertEqual(json_out, exp)

            # Test that auto-approved ACL gets automatically Approved
            data = {
                'namespace': 'rpms',
                'pkgname': 'guake',
                'branches': 'master',
                'acl': 'watchcommits',
                'acl_status': 'Awaiting Review',
                'user': 'toshio',
            }

            exp = {
                "messages": [
                    "user: pingou set for toshio acl: watchcommits of "
                    "package: guake from:  to: Approved on branch: "
                    "master"
                ],
                "output": "ok"
            }
            output = self.app.post('/api/package/acl/', data=data)
            json_out = json.loads(output.data)
            self.assertEqual(output.status_code, 200)
            self.assertEqual(json_out, exp)

        # Check if it fails normally
        user.username = 'Ralph'

        data = {
            'namespace': 'rpms',
            'pkgname': 'guake',
            'branches': 'master',
            'acl': 'commit',
            'acl_status': 'Approved',
            'user': 'toshio',
        }

        with user_set(APP, user):
            exp = {
                "error": "You are not allowed to update ACLs of someone else.",
                "output": "notok"
            }
            output = self.app.post('/api/package/acl/', data=data)
            json_out = json.loads(output.data)
            self.assertEqual(output.status_code, 500)
            self.assertEqual(json_out, exp)

        data = {
            'namespace': 'rpms',
            'pkgname': 'guake',
            'branches': 'master',
            'acl': 'commit',
            'acl_status': 'Awaiting Review',
            'user': 'toshio',
        }

        user = FakeFasUser()
        with user_set(APP, user):
            # Revert it back to Awaiting Review
            exp = {
                "messages": [
                    "user: pingou set for toshio acl: commit of package: "
                    "guake from: Approved to: Awaiting Review on branch: "
                    "master"
                ],
                "output": "ok"
            }
            output = self.app.post('/api/package/acl/', data=data)
            json_out = json.loads(output.data)
            self.assertEqual(output.status_code, 200)
            self.assertEqual(json_out, exp)

        data = {
            'namespace': 'rpms',
            'pkgname': 'guake',
            'branches': 'master',
            'acl': 'commit',
            'acl_status': 'Approved',
            'user': 'toshio',
        }

        # Check if it works for admins
        user = FakeFasUserAdmin()

        with user_set(APP, user):
            exp = {
                "messages": [
                    "user: admin set for toshio acl: commit of package: "
                    "guake from: Awaiting Review to: Approved on branch: "
                    "master"
                ],
                "output": "ok"
            }
            output = self.app.post('/api/package/acl/', data=data)
            json_out = json.loads(output.data)
            self.assertEqual(output.status_code, 200)
            self.assertEqual(json_out, exp)

            exp = {
                "messages": [
                    "Nothing to update on branch: master for acl: commit"
                ],
                "output": "ok"
            }
            output = self.app.post('/api/package/acl/', data=data)
            json_out = json.loads(output.data)
            self.assertEqual(output.status_code, 200)
            self.assertEqual(json_out, exp)

Example 6

Project: rest_gae Source File: users.py
def get_user_rest_class(**kwd):
    """Returns a USerRESTHandlerClass with the permissions set according to input"""

    class UserRESTHandlerClass(BaseRESTHandler):

        model = import_class(kwd.get('user_model', User))
        email_as_username = kwd.get('email_as_username', False)
        admin_only_user_registration = kwd.get('admin_only_user_registration', False)
        user_details_permission = kwd.get('user_details_permission', PERMISSION_OWNER_USER)
        verify_email_address = kwd.get('verify_email_address', False)
        verification_email = kwd.get('verification_email', None)
        verification_successful_url = kwd.get('verification_successful_url', None)
        verification_failed_url = kwd.get('verification_failed_url', None)
        reset_password_url = kwd.get('reset_password_url', None)
        reset_password_email = kwd.get('reset_password_email', None)
        user_policy_callback = [kwd.get('user_policy_callback', None)]
        send_email_callback = [kwd.get('send_email_callback', None)] # Wrapping in a list so the function won't be turned into a bound method
        allow_login_for_non_verified_email = kwd.get('allow_login_for_non_verified_email', True)

        # Validate arguments (we do this at this stage in order to raise exceptions immediately rather than while the app is running)
        if (model != User) and (User not in model.__bases__):
            raise ValueError('The provided user_model "%s" does not inherit from rest_gae.users.User class' % (model))
        if verify_email_address and not verification_email:
            raise ValueError('Must set "verification_email" when "verify_email_address" is True')
        if verification_email and set(verification_email.keys()) != set(['sender', 'subject', 'body_text', 'body_html']):
            raise ValueError('"verification_email" must include all of the following keys: sender, subject, body_text, body_html')
        if verify_email_address and not verification_successful_url:
            raise ValueError('Must set "verification_successful_url" when "verify_email_address" is True')
        if verify_email_address and not verification_failed_url:
            raise ValueError('Must set "verification_failed_url" when "verify_email_address" is True')
        if verify_email_address and not reset_password_url:
            raise ValueError('Must set "reset_password_url" when "verify_email_address" is True')
        if verify_email_address and not reset_password_email:
            raise ValueError('Must set "reset_password_email" when "verify_email_address" is True')
        if reset_password_email and set(reset_password_email.keys()) != set(['sender', 'subject', 'body_text', 'body_html']):
            raise ValueError('"reset_password_email" must include all of the following keys: sender, subject, body_text, body_html')


        permissions = { 'GET': PERMISSION_ANYONE, 'PUT': PERMISSION_OWNER_USER, 'DELETE': PERMISSION_OWNER_USER, 'POST': PERMISSION_ANYONE } # Used by get_response method when building the HTTP response header 'Access-Control-Allow-Methods'

        def __init__(self, request, response):
            self.initialize(request, response)

            self.send_email_callback = self.send_email_callback[0]

        def rest_method_wrapper(func):
            """Wraps GET/POST/PUT/DELETE methods and adds standard functionality"""

            def inner_f(self, model_id):
                # We make sure the auth session store is using the proper user model (we can't rely on the user initializing it from outside the library)
                self.auth.store.user_model = self.model

                method_name = func.func_name.upper()

                try:
                    # Call original method
                    if model_id:
                        model_id = model_id[1:] # Get rid of '/' at the beginning

                        if model_id == 'me':
                            # 'me' is shorthand for the currently logged-in user
                            if not self.user:
                                # User tried to retrieve information about himself without being logged-in
                                raise self.unauthorized()

                            model = self.user

                        elif (method_name == 'POST' and model_id in ['login', 'reset']) or (method_name == 'GET' and model_id == 'verify'):
                            model = model_id

                        else:
                            model = self._model_id_to_model(model_id)

                        return func(self, model)
                    else:
                        return func(self, None)

                except RESTException, exc:
                    return self.error(exc)

            return inner_f


        #
        # REST endpoint methods
        #


        @rest_method_wrapper
        def get(self, model):
            """GET endpoint - returns all users (if admin and not user id provided) or a specific user's details otherwise"""

            if not model:
                # Return all users (if admin)

                if not self.user:
                    # Must be logged-in
                    return self.unauthorized()
                if not self.user.is_admin:
                    # Must be an admin
                    return self.permission_denied()

                query = self._filter_query() # Filter the results
                query = self._order_query(query) # Order the results
                (results, cursor) = self._fetch_query(query) # Fetch them (with a limit / specific page, if provided)

                return self.success({
                    'results': results,
                    'next_results_url': self._build_next_query_url(cursor)
                    })


            elif model == 'verify':
                # It's an email verification link

                user_id = self.request.GET.get('user_id')
                signup_token = self.request.GET.get('signup_token')
                verification_type = self.request.GET.get('type')

                if not user_id or not signup_token or not verification_type:
                    return self.redirect(self.verification_failed_url)

                try:
                    user_id = int(user_id)
                except ValueError, exc:
                    return self.redirect(self.verification_failed_url)

                # it should be something more concise like
                # self.auth.get_user_by_token(user_id, signup_token)
                # unfortunately the auth interface does not (yet) allow to manipulate
                # signup tokens concisely
                try:
                    user, ts = self.user_model.get_by_auth_token(user_id, signup_token, 'signup')
                    if not user: raise Exception()
                except:
                    return self.redirect(self.verification_failed_url)

                # store user data in the session
                self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)

                if verification_type == 'v':
                    # User verified his email address after registration

                    # Remove signup token, we don't want users to come back with an old link
                    self.user_model.delete_signup_token(user.get_id(), signup_token)

                    # Mark user's email address as verified
                    if not user.is_email_verified:
                        user.is_email_verified = True
                        user.put()

                    return self.redirect(self.verification_successful_url)

                elif verification_type == 'p':
                    # User wants to reset his password

                    # Redirect to password reset URL with the token
                    return self.redirect(self.reset_password_url + '?signup_token=' + signup_token)

                else:
                    # Unknown verification type
                    return self.redirect(self.verification_failed_url)


            # Return the details of a single user (by ID)

            if self.user_details_permission != PERMISSION_ANYONE:
                # Verify permissions

                if not self.user:
                    # Must be logged-in
                    return self.unauthorized()

                if (self.user_details_permission == PERMISSION_OWNER_USER) and (self.user != model) and (not self.user.is_admin):
                    # The owning user (and admins) is only one that can view his own user details
                    return self.permission_denied()

                if (self.user_details_permission == PERMISSION_ADMIN) and (not self.user.is_admin):
                    # Must be an admin
                    return self.permission_denied()


            # Return user details
            return self.success(model)


        @rest_method_wrapper
        def post(self, model):
            """POST endpoint - registers a new user"""

            if model and model not in ['login', 'reset']:
                # Invalid usage of the endpoint
                raise RESTException('Cannot POST to a specific user ID')

            if model and model == 'reset':
                # Send a password reset email

                try:
                    # Parse POST data as JSON
                    json_data = json.loads(self.request.body)
                except ValueError, exc:
                    raise RESTException('Invalid JSON POST data')

                if 'user_name' not in json_data:
                    raise RESTException('Missing user_name argument')

                user = self.user_model.get_by_auth_id(json_data['user_name'])
                if not user:
                    raise RESTException('User not found: %s' % json_data['user_name'])

                # Send the reset password email
                self._send_verification_email(user, self.reset_password_email, True)

                return self.success({})


            elif model and model == 'login':
                # Login the user

                try:
                    # Parse POST data as JSON
                    json_data = json.loads(self.request.body)
                except ValueError, exc:
                    raise RESTException('Invalid JSON POST data')

                if 'user_name' not in json_data:
                    raise RESTException('Missing user_name argument')
                if 'password' not in json_data:
                    raise RESTException('Missing password argument')

                try:
                    user = self.auth.get_user_by_password(json_data['user_name'], json_data['password'], remember=True, save_session=True)
                except (InvalidAuthIdError, InvalidPasswordError) as e:
                    # Login failed
                    return self.permission_denied('Invalid user name / password')

                if not self.allow_login_for_non_verified_email and not user.is_email_verified:
                    # Don't allow the user to login since he hasn't verified his email address yet.
                    return self.permission_denied('Email address not verified')

                # Login successful
                return self.success(user)


            #
            # Register a new user
            #


            if self.admin_only_user_registration:
                if not self.user:
                    # Must be logged-in
                    return self.unauthorized()

                if not self.user.is_admin:
                    # Must be admin
                    return self.permission_denied()


            try:
                # Parse POST data as JSON
                json_data = json.loads(self.request.body)
            except ValueError, exc:
                raise RESTException('Invalid JSON POST data')


            try:
                # Any exceptions raised due to invalid/missing input will be caught

                if self.user_policy_callback is not None and self.user_policy_callback[0] is not None:
                    json_data = self.user_policy_callback[0](self.user, json_data)

                if not 'email' in json_data:
                    raise ValueError('Missing email')
                if not self.email_as_username and not 'user_name' in json_data:
                    raise ValueError('Missing user_name')
                if not 'password' in json_data:
                    raise ValueError('Missing password')

                user_name = json_data['email'] if self.email_as_username else json_data['user_name']
                password = json_data['password']

                # Sanitize the input
                json_data.pop('user_name', None)
                json_data.pop('password', None)
                json_data.pop('is_email_verified', None)

                if self.user and self.user.is_admin:
                    # Allow admins to create a new user and set his access level
                    is_admin = json_data.get('is_admin', False)
                else:
                    is_admin = False

                json_data.pop('is_admin', None)


                user_properties = { }

                # Make sure only properties defined in the user model will be written (since the parent webapp2 User model is an ExpandoModel)
                for prop_name in self.model._properties.keys():
                    if prop_name in json_data:
                        user_properties[prop_name] = json_data[prop_name]

                unique_properties = ['email']

                user_data = self.model.create_user(
                        user_name,
                        unique_properties,
                        password_raw=password,
                        is_email_verified=(False if self.verify_email_address else True),
                        is_admin=is_admin,
                        **user_properties
                        )

                if not user_data[0]:
                    # Caused due to multiple keys (i.e. the user is already registered or the username/email is taken by someone else)
                    existing_fields = ['user_name' if s == 'auth_id' else s for s in user_data[1]]
                    raise RESTException('Unable to register user - the following fields are already registered: %s' % (', '.join(existing_fields)))


                if self.verify_email_address:
                    # Send email verification
                    user = user_data[1]
                    self._send_verification_email(user, self.verification_email)

                # Return the newly-created user
                return self.success(user_data[1])

            except Exception, exc:
                raise RESTException('Invalid JSON POST data - %s' % exc)


        def _send_verification_email(self, user, email, reset_password=False):
            """Sends a verification email to a specific `user` with specific email details (in `email`). Creates a reset password link if `reset_password` is True."""

            # Prepare the verification URL
            user_id = user.get_id()
            token = self.user_model.create_signup_token(user_id)

            path_url = self.request.path_url
            path_url = path_url[:-len('verify')] if path_url.endswith('reset') else path_url
            path_url = path_url.rstrip('/')
            verification_params = { 'type': ('v' if not reset_password else 'p'), 'user_id': user_id, 'signup_token': token }
            verification_url = path_url + '/verify?' + urlencode(verification_params)

            # Prepare email body
            email['body_text'] = Template(email['body_text']).render(user=user, verification_url=verification_url)
            email['body_html'] = Template(email['body_html']).render(user=user, verification_url=verification_url)

            # Send the email
            if self.send_email_callback:
                # Use the provided function for sending the email
                self.send_email_callback(email)
            else:
                # Use GAE's email services
                message = mail.EmailMessage()
                message.sender = email['sender']
                message.to = user.email
                message.subject = email['subject']
                message.body = email['body_text']
                message.html = email['body_html']
                message.send()


        @rest_method_wrapper
        def put(self, model):
            """PUT endpoint - updates a user's details"""

            if not model:
                # Invalid usage of the endpoint
                raise RESTException('Must provide user ID for PUT endpoint')


            if not self.user:
                # Must be logged-in
                return self.unauthorized()

            if (self.user != model) and (not self.user.is_admin):
                # The owning user (and admins) is only one that can update his own user details
                return self.permission_denied()


            try:
                # Parse PUT data as JSON
                json_data = json.loads(self.request.body)
            except ValueError, exc:
                raise RESTException('Invalid JSON PUT data')



            # Update the user
            try:
                # Any exceptions raised due to invalid/missing input will be caught

                if self.user_policy_callback is not None:
                    self.user_policy_callback[0](self.user, json_data)
                model = self._build_model_from_data(json_data, self.model, model)
                if self.user.is_admin:
                    # Allow the admin to change sensitive properties
                    if json_data.has_key('is_admin'):
                        model.is_admin = json_data['is_admin']
                    if json_data.has_key('is_email_verified'):
                        model.is_email_verified = json_data['is_email_verified']

                if json_data.has_key('password'):
                    # Change password if requested
                    model.set_password(json_data['password'])

                if json_data.has_key('signup_token'):
                    # Remove signup token (generated from a reset password link), we don't want users to come back with an old link
                    self.user_model.delete_signup_token(self.user.get_id(), json_data['signup_token'])

                model.put()

            except Exception, exc:
                raise RESTException('Invalid JSON PUT data - %s' % exc)


            # Return the updated user details
            return self.success(model)

        @rest_method_wrapper
        def delete(self, model):
            """DELETE endpoint - deletes an existing user"""

            if not model:
                # Invalid usage of the endpoint
                raise RESTException('Must provide user ID for DELETE endpoint')

            if not self.user:
                # Must be logged-in
                return self.unauthorized()

            if (self.user != model) and (not self.user.is_admin):
                # The owning user (and admins) is only one that can delete his own account
                return self.permission_denied()


            # Delete the user
            try:
                self.user_model.remove_unique(model.email, ['email'], email=model.email)
                model.key.delete()
            except Exception, exc:
                raise RESTException('Could not delete user - %s' % exc)


            # Return the deleted user instance
            return self.success(model)



    # Return the class statically initialized with given input arguments
    return UserRESTHandlerClass

Example 7

Project: MediaBrowser.Kodi Source File: RandomItems.py
    def updateRandom(self):
        self.logMsg("updateRandomMovies Called")
        useBackgroundData = xbmcgui.Window(10000).getProperty("BackgroundDataLoaded") == "true"
        
        addonSettings = xbmcaddon.Addon(id='plugin.video.xbmb3c')
        mb3Host = addonSettings.getSetting('ipaddress')
        mb3Port = addonSettings.getSetting('port')    
        userName = addonSettings.getSetting('username')     
        
        downloadUtils = DownloadUtils()
        userid = downloadUtils.getUserId()
        self.logMsg("updateRandomMovies UserID : " + userid)
        
        self.logMsg("Updating Random Movie List")
        
        randomUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=30&Recursive=true&SortBy=Random&Fields=Path,Genres,MediaStreams,Overview,ShortOverview,CriticRatingSummary&SortOrder=Descending&Filters=IsUnplayed,IsNotFolder&IncludeItemTypes=Movie&format=json"
                
        jsonData = downloadUtils.downloadUrl(randomUrl, suppress=True, popup=1)
        if(jsonData == ""):
            return
            
        result = json.loads(jsonData)
        self.logMsg("Random Movie Json Data : " + str(result), level=2)
        
        result = result.get("Items")
        if(result == None):
            result = []
            
        db = Database()
        WINDOW = xbmcgui.Window(10000)

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
            
            rating = item.get("CommunityRating")
            criticrating = item.get("CriticRating")
            officialrating = item.get("OfficialRating")
            criticratingsummary = ""
            if(item.get("CriticRatingSummary") != None):
                criticratingsummary = item.get("CriticRatingSummary").encode('utf-8')
            plot = item.get("Overview")
            if plot == None:
                plot = ''
            plot = plot.encode('utf-8')
            shortplot = item.get("ShortOverview")
            if shortplot == None:
                shortplot = ''
            shortplot = shortplot.encode('utf-8')
            year = item.get("ProductionYear")
            if(item.get("RunTimeTicks") != None):
                runtime = str(int(item.get("RunTimeTicks")) / (10000000 * 60))
            else:
                runtime = "0"

            item_id = item.get("Id")
            
            if useBackgroundData != True:
                poster = downloadUtils.getArtwork(item, "Primary3")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(item, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                landscape = downloadUtils.getArtwork(item, "Thumb3")
                discart = downloadUtils.getArtwork(item, "Disc")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumb = downloadUtils.getArtwork(item, "Thumb3")
                else:
                    realthumb = medium_fanart
            else:
                poster = db.get(item_id +".Primary3")
                thumbnail = db.get(item_id +".Primary")
                logo = db.get(item_id +".Logo")
                fanart = db.get(item_id +".Backdrop")
                landscape = db.get(item_id +".Thumb3")
                discart = db.get(item_id +".Disc")
                medium_fanart = db.get(item_id +".Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumb = db.get(item_id +".Thumb3")
                else:
                    realthumb = medium_fanart  
            
            url = mb3Host + ":" + mb3Port + ',;' + item_id
            # play or show info
            selectAction = addonSettings.getSetting('selectAction')
            if(selectAction == "1"):
                playUrl = "plugin://plugin.video.xbmb3c/?id=" + item_id + '&mode=' + str(_MODE_ITEM_DETAILS)
            else:
                playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
                      
            playUrl = playUrl.replace("\\\\", "smb://")
            playUrl = playUrl.replace("\\", "/")    

            self.logMsg("RandomMovieMB3." + str(item_count) + ".Title = " + title, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Art(fanart)  = " + fanart, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Art(clearlogo)  = " + logo, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Art(poster)  = " + thumbnail, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Rating  = " + str(rating), level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".CriticRating  = " + str(criticrating), level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".CriticRatingSummary  = " + criticratingsummary, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Year  = " + str(year), level=2)
            self.logMsg("RandomMovieMB3." + str(item_count) + ".Runtime  = " + str(runtime), level=2)
            
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Title", title)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Thumb", realthumb)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Path", playUrl)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Art(fanart)", fanart)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Art(landscape)", landscape)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Art(medium_fanart)", medium_fanart)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Art(clearlogo)", logo)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Art(poster)", thumbnail)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".RealThumb", realthumb)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Rating", str(rating))
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Mpaa", str(officialrating))
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".CriticRating", str(criticrating))
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".CriticRatingSummary", criticratingsummary)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Plot", plot)
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".ShortPlot", shortplot)
            
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Year", str(year))
            WINDOW.setProperty("RandomMovieMB3." + str(item_count) + ".Runtime", str(runtime))
            
            WINDOW.setProperty("RandomMovieMB3.Enabled", "true")
            
            item_count = item_count + 1
        
        self.logMsg("Updating Random TV Show List")
        
        randomUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=10&Recursive=true&SortBy=Random&Fields=Path,Genres,MediaStreams,Overview,ShortOverview&SortOrder=Descending&Filters=IsUnplayed,IsNotFolder&IsVirtualUnaired=false&IsMissing=False&IncludeItemTypes=Episode&format=json"
                 
        jsonData = downloadUtils.downloadUrl(randomUrl, suppress=True, popup=1)
        result = json.loads(jsonData)
        self.logMsg("Random TV Show Json Data : " + str(result), level=2)
        
        result = result.get("Items")
        if(result == None):
            result = []   

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
                
            seriesName = "Missing Name"
            if(item.get("SeriesName") != None):
                seriesName = item.get("SeriesName").encode('utf-8')   

            eppNumber = "X"
            tempEpisodeNumber = ""
            if(item.get("IndexNumber") != None):
                eppNumber = item.get("IndexNumber")
                if eppNumber < 10:
                  tempEpisodeNumber = "0" + str(eppNumber)
                else:
                  tempEpisodeNumber = str(eppNumber)
            
            seasonNumber = item.get("ParentIndexNumber")
            if seasonNumber < 10:
              tempSeasonNumber = "0" + str(seasonNumber)
            else:
              tempSeasonNumber = str(seasonNumber)
            rating = str(item.get("CommunityRating"))
            plot = item.get("Overview")
            if plot == None:
                plot = ''
            plot = plot.encode('utf-8')
            shortplot = item.get("ShortOverview")
            if shortplot == None:
                shortplot = ''
            shortplot = shortplot.encode('utf-8')
            item_id = item.get("Id")
            seriesId = item.get("SeriesId")          
              
            if useBackgroundData != True:
                seriesJsonData = downloadUtils.downloadUrl("http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items/" + seriesId + "?format=json", suppress=True, popup=1 )
                seriesResult = json.loads(seriesJsonData)      
                officialrating = seriesResult.get("OfficialRating")        
                poster = downloadUtils.getArtwork(seriesResult, "Primary3")
                small_poster = downloadUtils.getArtwork(seriesResult, "Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(seriesResult, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                banner = downloadUtils.getArtwork(item, "Banner")
                if (seriesResult.get("ImageTags") != None and seriesResult.get("ImageTags").get("Thumb") != None):
                  seriesthumbnail = downloadUtils.getArtwork(seriesResult, "Thumb3")
                else:
                  seriesthumbnail = medium_fanart 
            else:
                officialrating = db.get(seriesId + ".OfficialRating")
                poster = db.get(seriesId + ".Primary3")
                small_poster = db.get(seriesId + ".Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = db.get(seriesId + ".Logo")
                fanart = db.get(seriesId + ".Backdrop")
                medium_fanart = db.get(seriesId + ".Backdrop3")
                banner = db.get(seriesId + ".Banner")
                if item.get("SeriesThumbImageTag") != None:
                   seriesthumbnail = db.get(seriesId + ".Thumb3")
                else:
                   seriesthumbnail = fanart
			
              
            url = mb3Host + ":" + mb3Port + ',;' + item_id
            selectAction = addonSettings.getSetting('selectAction')
            if(selectAction == "1"):
                playUrl = "plugin://plugin.video.xbmb3c/?id=" + item_id + '&mode=' + str(_MODE_ITEM_DETAILS)
            else:
                playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\", "smb://")
            playUrl = playUrl.replace("\\", "/")    

            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".EpisodeTitle = " + title, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".ShowTitle = " + seriesName, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".EpisodeNo = " + tempEpisodeNumber, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".SeasonNo = " + tempSeasonNumber, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Rating  = " + rating, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)  = " + fanart, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)  = " + logo, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)  = " + banner, level=2)  
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)  = " + poster, level=2)
            self.logMsg("RandomEpisodeMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            
            
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".EpisodeTitle", title)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".ShowTitle", seriesName)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".EpisodeNo", tempEpisodeNumber)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".SeasonNo", tempSeasonNumber)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".SeriesThumb", seriesthumbnail)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Path", playUrl)            
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Rating", rating)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)", fanart)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.medium_fanart)", medium_fanart)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)", logo)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)", banner)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)", poster)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".Plot", plot)
            WINDOW.setProperty("RandomEpisodeMB3." + str(item_count) + ".ShortPlot", shortplot)
            
            WINDOW.setProperty("RandomEpisodeMB3.Enabled", "true")
            
            item_count = item_count + 1
            
        # update random music
        self.logMsg("Updating Random MusicList")
    
        randomUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=30&Recursive=true&SortBy=Random&Fields=Path,Genres,MediaStreams,Overview&SortOrder=Descending&Filters=IsUnplayed,IsFolder&IsVirtualUnaired=false&IsMissing=False&IncludeItemTypes=MusicAlbum&format=json"
    
        jsonData = downloadUtils.downloadUrl(randomUrl, suppress=True, popup=1)
        result = json.loads(jsonData)
        self.logMsg("Random MusicList Json Data : " + str(result), level=2)
    
        result = result.get("Items")
        if(result == None):
          result = []   

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
                
            artist = "Missing Artist"
            if(item.get("AlbumArtist") != None):
                artist = item.get("AlbumArtist").encode('utf-8')   

            year = "0000"
            if(item.get("ProductionYear") != None):
              year = str(item.get("ProductionYear"))
            plot = "Missing Plot"
            if(item.get("Overview") != None):
              plot = item.get("Overview").encode('utf-8')

            item_id = item.get("Id")
           
            if item.get("Type") == "MusicAlbum":
               parentId = item.get("ParentLogoItemId")
            
            thumbnail = downloadUtils.getArtwork(item, "Primary")
            logo = downloadUtils.getArtwork(item, "Logo")
            fanart = downloadUtils.getArtwork(item, "Backdrop")
            banner = downloadUtils.getArtwork(item, "Banner")
            
            url = mb3Host + ":" + mb3Port + ',;' + item_id
            playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\", "smb://")
            playUrl = playUrl.replace("\\", "/")    

            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Title = " + title, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Artist = " + artist, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Year = " + year, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Art(fanart)  = " + fanart, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Art(clearlogo)  = " + logo, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Art(banner)  = " + banner, level=2)  
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Art(poster)  = " + thumbnail, level=2)
            self.logMsg("RandomAlbumMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            
            
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Title", title)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Artist", artist)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Year", year)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Path", playUrl)            
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Rating", rating)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Art(fanart)", fanart)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Art(clearlogo)", logo)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Art(banner)", banner)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Art(poster)", thumbnail)
            WINDOW.setProperty("RandomAlbumMB3." + str(item_count) + ".Plot", plot)
            
            WINDOW.setProperty("RandomAlbumMB3.Enabled", "true")
            
            item_count = item_count + 1

Example 8

Project: Bitcoin-Trading-Client Source File: main.py
def animate(i):
    global refreshRate
    global DatCounter



    def moving_average(x, n, type='simple'):

        x = np.asarray(x)
        if type=='simple':
            weights = np.ones(n)
        else:
            weights = np.exp(np.linspace(-1., 0., n))

        weights /= weights.sum()


        a =  np.convolve(x, weights, mode='full')[:len(x)]
        a[:n] = a[n]
        return a


    def computeMACD(x, slow=26, fast=12,location="bottom"):
        """
        compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg'
        return value is emaslow, emafast, macd which are len(x) arrays
        """
        values = {'key': 1,'prices':x}


        url = "http://seaofbtc.com/api/indicator/macd"
        data = urllib.parse.urlencode(values)
        data = data.encode('utf-8')
        req = urllib.request.Request(url, data)
        resp = urllib.request.urlopen(req)
        respData = resp.read()
        newData = str(respData).replace("b","").replace('[','').replace(']','').replace("'",'')

        #print(newData)

        split = newData.split('::')

        macd = split[0]
        ema9 = split[1]
        hist = split[2]

        macd = macd.split(", ")
        ema9 = ema9.split(", ")
        hist = hist.split(", ")


        try:
            macd = [float(i) for i in macd]
        except Exception as e:
            print(str(e)+"  macd")
        try:
            ema9 = [float(i) for i in ema9]
        except Exception as e:
            print(str(e)+"  ema9")
        try:
            hist = [float(i) for i in hist]
        except Exception as e:
            print(str(e)+"  hist")





        print("call!!!")
        



        if location == "top":
            try:
                a0.plot(OHLC['MPLDates'][fast:], macd[fast:], color=darkColor, lw=2)
                a0.plot(OHLC['MPLDates'][fast:], ema9[fast:], color=lightColor, lw=1)
                a0.fill_between(OHLC['MPLDates'][fast:], hist[fast:], 0, alpha=0.5, facecolor=darkColor, edgecolor=darkColor)
                datLabel = "MACD"
                a0.set_ylabel(datLabel)
            except Exception as e:
                print(str(e))
                topIndicator = "none"
                

        elif location == "bottom":
            try:
                a3.plot(OHLC['MPLDates'][fast:], macd[fast:], color=darkColor, lw=2)
                a3.plot(OHLC['MPLDates'][fast:], ema9[fast:], color=lightColor, lw=1)
                a3.fill_between(OHLC['MPLDates'][fast:], hist[fast:], 0, alpha=0.5, facecolor=darkColor, edgecolor=darkColor)
                datLabel = "MACD"
                a3.set_ylabel(datLabel)
            except Exception as e:
                print(str(e))
                bottomIndicator = "none"
            


    def rsiIndicator(priceData,location="top"):

        if location == "top":
            values = {'key': 1,'prices':priceData,'periods':topIndicator[1]}

        elif location == "bottom":
            values = {'key': 1,'prices':priceData,'periods':bottomIndicator[1]}

            
        url = "http://seaofbtc.com/api/indicator/rsi"
        data = urllib.parse.urlencode(values)
        data = data.encode('utf-8')
        req = urllib.request.Request(url, data)
        resp = urllib.request.urlopen(req)
        respData = resp.read()
        newData = str(respData).replace("b","").replace('[','').replace(']','').replace("'",'')
        priceList = newData.split(', ')
        rsiData = [float(i) for i in priceList]

        print("call!!!")


        if location == "top":
            a0.plot_date(OHLC['MPLDates'], rsiData,lightColor, label ="RSI")
            datLabel = "RSI("+str(topIndicator[1])+")"
            a0.set_ylabel(datLabel)

        elif location == "bottom":
            a3.plot_date(OHLC['MPLDates'], rsiData,lightColor, label ="RSI")
            datLabel = "RSI("+str(bottomIndicator[1])+")"
            a3.set_ylabel(datLabel)

            



        
    print(exchange)

    if chartLoad:
        if paneCount == 1:
            if DataPace == 'tick':
                try:
                    if exchange == 'BTC-e':
                        a = plt.subplot2grid((6,4), (0,0), rowspan=5, colspan=4)
                        a2 = plt.subplot2grid((6,4), (5,0), rowspan=1, colspan=4, sharex = a)
                        
                        dataLink = 'https://btc-e.com/api/3/trades/btc_usd?limit=2000'

                        data = urllib.request.urlopen(dataLink)
                        data = data.readall().decode('utf-8')
                        data = json.loads(data)
                        data = data["btc_usd"]
                        data = pd.DataFrame(data)

                        

                        data["datestamp"] = np.array(data['timestamp']).astype('datetime64[s]')
                        allDates = data["datestamp"].tolist()

                        buys = data[(data['type']=='bid')]
                        #buys["datestamp"] = np.array(buys['timestamp']).astype('datetime64[s]')
                        buyDates = (buys["datestamp"]).tolist()

                        sells = data[(data['type']=='ask')]
                        #sells["datestamp"] = np.array(sells['timestamp']).astype('datetime64[s]')
                        sellDates = (sells["datestamp"]).tolist()

                        volume = data["amount"]
                        
                        a.clear()
                        
                        a.plot_date(buyDates,buys["price"], '#00A3E0', label ="buys")
                        a.plot_date(sellDates,sells["price"], '#183A54', label = "sells")
                        a2.fill_between(allDates,0, volume, facecolor='#183A54')
                        a.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
                           ncol=2, borderaxespad=0.)

                        a.xaxis.set_major_locator(mticker.MaxNLocator(5))
                        a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M'))
                        plt.setp(a.get_xticklabels(), visible=False)
                        
                        title = exchange+' Tick Data\nLast Price: '+str(data["price"][0])
                        a.set_title(title)
                        priceData = data["price"].apply(float).tolist()


                        
                    if exchange == 'Bitstamp':
                        a = plt.subplot2grid((6,4), (0,0), rowspan=5, colspan=4)
                        a2 = plt.subplot2grid((6,4), (5,0), rowspan=1, colspan=4, sharex = a)

                        dataLink = 'https://www.bitstamp.net/api/transactions/'
                        data = urllib.request.urlopen(dataLink)
                        data = data.readall().decode('utf-8')
                        data = json.loads(data)
                        data = pd.DataFrame(data)
                        data["datestamp"] = np.array(data['date'].apply(int)).astype('datetime64[s]')
                        datestamps = data["datestamp"].tolist()
                        volume = data["amount"].apply(float).tolist()

                        a.clear()

                        a.plot_date(datestamps,data["price"], '#183A54')


                        a2.fill_between(datestamps,0, volume, facecolor='#183A54')


                        a.xaxis.set_major_locator(mticker.MaxNLocator(5))
                        a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M'))
                        plt.setp(a.get_xticklabels(), visible=False)
                        
                        title = exchange+' Tick Data\nLast Price: '+str(data["price"][0])
                        a.set_title(title)
                        priceData = data["price"].apply(float).tolist()


                    if exchange == 'Bitfinex':
                        a = plt.subplot2grid((6,4), (0,0), rowspan=5, colspan=4)
                        a2 = plt.subplot2grid((6,4), (5,0), rowspan=1, colspan=4, sharex = a)
                        
                        dataLink = 'https://api.bitfinex.com/v1/trades/btcusd?limit=2000'

                        data = urllib.request.urlopen(dataLink)
                        data = data.readall().decode('utf-8')
                        data = json.loads(data)
                        data = pd.DataFrame(data)
                        
                        volume = data["amount"].apply(float).tolist()

                        #print(data)

                        data["datestamp"] = np.array(data['timestamp']).astype('datetime64[s]')
                        allDates = data["datestamp"].tolist()

                        buys = data[(data['type']=='buy')]
                        #buys["datestamp"] = np.array(buys['timestamp']).astype('datetime64[s]')
                        buyDates = (buys["datestamp"]).tolist()

                        sells = data[(data['type']=='sell')]
                        #sells["datestamp"] = np.array(sells['timestamp']).astype('datetime64[s]')
                        sellDates = (sells["datestamp"]).tolist()

                        a.clear()
                        
                        
                        a.plot_date(buyDates,buys["price"], lightColor, label ="buys")
                        a.plot_date(sellDates,sells["price"], darkColor, label = "sells")
                        a2.fill_between(allDates,0, volume, facecolor='#183A54')


                        a.xaxis.set_major_locator(mticker.MaxNLocator(5))
                        a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M'))
                        plt.setp(a.get_xticklabels(), visible=False)
                        a.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
                           ncol=2, borderaxespad=0.)
                        
                        title = exchange+' Tick Data\nLast Price: '+str(data["price"][0])
                        a.set_title(title)
                        priceData = data["price"].apply(float).tolist()

                    if exchange == 'Huobi':
                        try:
                            a = plt.subplot2grid((6,4), (0,0), rowspan=6, colspan=4)

                            data = urllib.request.urlopen('http://seaofbtc.com/api/basic/price?key=1&tf=1d&exchange='+programName).read()
                            
                            data = str(data).replace('b','').replace("'",'')
                            data = json.loads(data)

                            

                            dateStamp = np.array(data[0]).astype('datetime64[s]')
                            dateStamp = dateStamp.tolist()
                            print('here')

                            df = pd.DataFrame({'Datetime':dateStamp})

                            
                            
                            
                            df['Price'] = data[1]
                            
                            df['Volume'] = data[2]
                            df['Symbol'] = "BTCUSD"
                            df['MPLDate'] = df['Datetime'].apply(lambda date: mdates.date2num(date.to_pydatetime()))
                            df = df.set_index('Datetime')
                            lastPrice = df['Price'][-1]

                            a.plot_date(df['MPLDate'][-4500:],df['Price'][-4500:], lightColor, label ="price")

                            a.xaxis.set_major_locator(mticker.MaxNLocator(5))
                            a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M'))

                            
                            title = exchange+' Tick Data\nLast Price: '+str(lastPrice)
                            a.set_title(title)
                            priceData = df['Price'].apply(float).tolist()
                        except Exception as e:
                            print(str(e))
                  
                except:
                    DatCounter = 9000
###### BEGIN NON-TICK GRAPHING#################################################################################    

            else:

                if DatCounter > 12:
                    try:
                        if exchange == 'Huobi':
                            if topIndicator != "none":

                                a = plt.subplot2grid((6,4), (1,0), rowspan=5, colspan=4)
                                a0 = plt.subplot2grid((6,4), (0,0), sharex=a, rowspan=1, colspan=4)
                            else:
                                a = plt.subplot2grid((6,4), (0,0), rowspan=6, colspan=4)

                        else:
                            if topIndicator != "none" and bottomIndicator != "none":
                                # actual price chart. 
                                a = plt.subplot2grid((6,4), (1,0), rowspan=3, colspan=4)
                                # volume!
                                a2 = plt.subplot2grid((6,4), (4,0), sharex=a, rowspan=1, colspan=4)
                                # top indicator
                                a0 = plt.subplot2grid((6,4), (0,0), sharex=a, rowspan=1, colspan=4)
                                # bottom indicator
                                a3 = plt.subplot2grid((6,4), (5,0), sharex=a, rowspan=1, colspan=4)
                                
                            elif topIndicator != "none":
                                a = plt.subplot2grid((6,4), (1,0), rowspan=4, colspan=4)
                                a2 = plt.subplot2grid((6,4), (5,0), sharex=a, rowspan=1, colspan=4)
                                a0 = plt.subplot2grid((6,4), (0,0), sharex=a, rowspan=1, colspan=4)
                            elif bottomIndicator != "none":
                                a = plt.subplot2grid((6,4), (0,0), rowspan=4, colspan=4)
                                a2 = plt.subplot2grid((6,4), (4,0), sharex=a, rowspan=1, colspan=4)
                                #a0 = plt.subplot2grid((6,4), (0,0), sharex=a, rowspan=1, colspan=4)
                                a3 = plt.subplot2grid((6,4), (5,0), sharex=a, rowspan=1, colspan=4)

                            else:
                                a = plt.subplot2grid((6,4), (0,0), rowspan=5, colspan=4)
                                a2 = plt.subplot2grid((6,4), (5,0), sharex=a, rowspan=1, colspan=4)
                                
                                
                            
                        print('http://seaofbtc.com/api/basic/price?key=1&tf='+DataPace+'&exchange='+programName)
                        data = urllib.request.urlopen('http://seaofbtc.com/api/basic/price?key=1&tf='+DataPace+'&exchange='+programName).read()




                            
                        data = str(data).replace('b','').replace("'",'')
                        data = json.loads(data)

                        dateStamp = np.array(data[0]).astype('datetime64[s]')
                        dateStamp = dateStamp.tolist()

                        df = pd.DataFrame({'Datetime':dateStamp})
                        df['Price'] = data[1]
                        df['Volume'] = data[2]
                        df['Symbol'] = "BTCUSD"
                        df['MPLDate'] = df['Datetime'].apply(lambda date: mdates.date2num(date.to_pydatetime()))
                        df = df.set_index('Datetime')


                        OHLC =  df['Price'].resample(resampleSize, how='ohlc')
                        OHLC = OHLC.dropna() 

                        volumeData = df['Volume'].resample(resampleSize, how={'volume':'sum'})

                        OHLC['dateCopy'] = OHLC.index
                        OHLC['MPLDates'] = OHLC['dateCopy'].apply(lambda date: mdates.date2num(date.to_pydatetime()))
                        del OHLC['dateCopy']

                        volumeData['dateCopy'] = volumeData.index
                        volumeData['MPLDates'] = volumeData['dateCopy'].apply(lambda date: mdates.date2num(date.to_pydatetime()))
                        del volumeData['dateCopy']


                        priceData = OHLC['close'].apply(float).tolist()

                        

                                
                        
                        

                        a.clear()
                        if middleIndicators != "none":
                            for eachMA in middleIndicators:
                                ewma = pd.stats.moments.ewma
                                #print("type:",eachMA[0],"periods:",eachMA[1])
                                if eachMA[0] == "sma":
                                    sma = pd.rolling_mean(OHLC["close"],eachMA[1])
                                    label = str(eachMA[1])+" SMA"
                                    a.plot(OHLC['MPLDates'],sma, label=label)
                                if eachMA[0] == "ema":
                                    ewma = pd.stats.moments.ewma
                                    label = str(eachMA[1])+" EMA"
                                    a.plot(OHLC['MPLDates'],ewma(OHLC["close"], eachMA[1]), label=label)


                            #a.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
                            #   ncol=2, borderaxespad=0.)

                            a.legend(loc=0)




                                    

                        if topIndicator[0] == "rsi":
                            rsiIndicator(priceData,"top")
                        elif topIndicator == "macd":
                            try:
                                computeMACD(priceData,location="top")
                            except:
                                print("failed macd")
                            


                            
                        if bottomIndicator[0] == "rsi":
                            rsiIndicator(priceData,"bottom")
                        elif bottomIndicator == "macd":
                            try:
                                computeMACD(priceData,location="bottom")
                            except:
                                print("failed macd")

                        
                            

                        
                        
                        csticks = candlestick_ohlc(a, OHLC[['MPLDates', 'open', 'high', 'low', 'close']].values, width=candleWidth, colorup=lightColor, colordown=darkColor)
                        a.set_ylabel("price")
                        if exchange != 'Huobi':
                            a2.fill_between(volumeData['MPLDates'],0, volumeData['volume'], facecolor='#183A54')#, alpha=.4)
                            a2.set_ylabel("volume")

                        
                        a.xaxis.set_major_locator(mticker.MaxNLocator(3))
                        a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M'))

                        plt.setp(a.get_xticklabels(), visible=False)
                        
                        if topIndicator != "none":  
                            plt.setp(a0.get_xticklabels(), visible=False)

                        if bottomIndicator != "none":  
                            plt.setp(a2.get_xticklabels(), visible=False)

                        x = (len(OHLC['close']))-1

                        if DataPace == '1d':
                            title = exchange+' 1 Day Data with '+resampleSize+' Bars\nLast Price: '+str(OHLC['close'][x])
                        if DataPace == '3d':
                            title = exchange+' 3 Day Data with '+resampleSize+' Bars\nLast Price: '+str(OHLC['close'][x])
                        if DataPace == '7d':
                            title = exchange+' 7 Day Data with '+resampleSize+' Bars\nLast Price: '+str(OHLC['close'][x])


                        if topIndicator != "none":  
                            a0.set_title(title)
                        else:
                            a.set_title(title)
                        print('NewGraph!')
                        
                        DatCounter = 0



                        
                        



                        
                    except Exception as e:
                        print(str(e),"main animate non tick")
                        DatCounter = 9000
                        
                else:
                    DatCounter += 1

Example 9

Project: horizon Source File: nova_data.py
def data(TEST):
    TEST.servers = utils.TestDataContainer()
    TEST.flavors = utils.TestDataContainer()
    TEST.flavor_access = utils.TestDataContainer()
    TEST.keypairs = utils.TestDataContainer()
    TEST.security_groups = utils.TestDataContainer()
    TEST.security_groups_uuid = utils.TestDataContainer()
    TEST.security_group_rules = utils.TestDataContainer()
    TEST.security_group_rules_uuid = utils.TestDataContainer()
    TEST.volumes = utils.TestDataContainer()
    TEST.quotas = utils.TestDataContainer()
    TEST.quota_usages = utils.TestDataContainer()
    TEST.disabled_quotas = utils.TestDataContainer()
    TEST.floating_ips = utils.TestDataContainer()
    TEST.floating_ips_uuid = utils.TestDataContainer()
    TEST.usages = utils.TestDataContainer()
    TEST.certs = utils.TestDataContainer()
    TEST.availability_zones = utils.TestDataContainer()
    TEST.hypervisors = utils.TestDataContainer()
    TEST.services = utils.TestDataContainer()
    TEST.aggregates = utils.TestDataContainer()
    TEST.hosts = utils.TestDataContainer()
    TEST.server_groups = utils.TestDataContainer()

    # Data return by novaclient.
    # It is used if API layer does data conversion.
    TEST.api_floating_ips = utils.TestDataContainer()
    TEST.api_floating_ips_uuid = utils.TestDataContainer()

    # Volumes
    volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3775",
         "name": 'test_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Volume name',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
         "name": '',
         "status": 'in-use',
         "size": 10,
         "display_name": '',
         "display_description": '',
         "device": "/dev/hda",
         "created_at": '2010-11-21 18:34:25',
         "volume_type": 'vol_type_1',
         "attachments": [{"id": "1", "server_id": '1',
                          "device": "/dev/hda"}]})
    attached_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
         "name": 'my_volume',
         "status": 'in-use',
         "size": 30,
         "display_name": 'My Volume',
         "display_description": '',
         "device": "/dev/hdk",
         "created_at": '2011-05-01 11:54:33',
         "volume_type": 'vol_type_2',
         "attachments": [{"id": "2", "server_id": '1',
                          "device": "/dev/hdk"}]})
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3771",
         "name": 'non_bootable_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Non Bootable Volume',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    attached_volume.bootable = 'true'
    non_bootable_volume.bootable = 'false'

    TEST.volumes.add(volume)
    TEST.volumes.add(nameless_volume)
    TEST.volumes.add(attached_volume)
    TEST.volumes.add(non_bootable_volume)

    # Flavors
    flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
                               'name': 'm1.tiny',
                               'vcpus': 1,
                               'disk': 0,
                               'ram': 512,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': {},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 0})
    flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
                               'name': 'm1.massive',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': {'Trusted': True, 'foo': 'bar'},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_3 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
                               'name': 'm1.secret',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': {},
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_4 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee",
                               'name': 'm1.metadata',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': FlavorExtraSpecs(
                                   {'key': 'key_mock',
                                    'value': 'value_mock'}),
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    TEST.flavors.add(flavor_1, flavor_2, flavor_3, flavor_4)

    flavor_access_manager = flavor_access.FlavorAccessManager(None)
    flavor_access_1 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "1",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    flavor_access_2 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "2",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    TEST.flavor_access.add(flavor_access_1, flavor_access_2)

    # Key pairs
    keypair = keypairs.Keypair(keypairs.KeypairManager(None),
                               dict(name='keyName'))
    TEST.keypairs.add(keypair)

    # Security Groups and Rules
    def generate_security_groups(is_uuid=False):

        def get_id(is_uuid):
            global current_int_id
            if is_uuid:
                return str(uuid.uuid4())
            else:
                get_id.current_int_id += 1
                return get_id.current_int_id

        get_id.current_int_id = 0

        sg_manager = sec_groups.SecurityGroupManager(None)
        rule_manager = rules.SecurityGroupRuleManager(None)

        sec_group_1 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"default",
                                                "description": u"default"})
        sec_group_2 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"other_group",
                                                "description": u"NotDefault."})
        sec_group_3 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"another_group",
                                                "description": u"NotDefault."})

        rule = {'id': get_id(is_uuid),
                'group': {},
                'ip_protocol': u"tcp",
                'from_port': u"80",
                'to_port': u"80",
                'parent_group_id': sec_group_1.id,
                'ip_range': {'cidr': u"0.0.0.0/32"}}

        icmp_rule = {'id': get_id(is_uuid),
                     'group': {},
                     'ip_protocol': u"icmp",
                     'from_port': u"9",
                     'to_port': u"5",
                     'parent_group_id': sec_group_1.id,
                     'ip_range': {'cidr': u"0.0.0.0/32"}}

        group_rule = {'id': 3,
                      'group': {},
                      'ip_protocol': u"tcp",
                      'from_port': u"80",
                      'to_port': u"80",
                      'parent_group_id': sec_group_1.id,
                      'source_group_id': sec_group_1.id}

        rule_obj = rules.SecurityGroupRule(rule_manager, rule)
        rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
        rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)

        sec_group_1.rules = [rule_obj]
        sec_group_2.rules = [rule_obj]

        return {"rules": [rule_obj, rule_obj2, rule_obj3],
                "groups": [sec_group_1, sec_group_2, sec_group_3]}

    sg_data = generate_security_groups()
    TEST.security_group_rules.add(*sg_data["rules"])
    TEST.security_groups.add(*sg_data["groups"])

    sg_uuid_data = generate_security_groups(is_uuid=True)
    TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
    TEST.security_groups_uuid.add(*sg_uuid_data["groups"])

    # Quota Sets
    quota_data = dict(metadata_items='1',
                      injected_file_content_bytes='1',
                      ram=10000,
                      floating_ips='1',
                      fixed_ips='10',
                      instances='10',
                      injected_files='1',
                      cores='10',
                      security_groups='10',
                      security_group_rules='20')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.quotas.nova = base.QuotaSet(quota)
    TEST.quotas.add(base.QuotaSet(quota))

    # nova quotas disabled when neutron is enabled
    disabled_quotas_nova = {'floating_ips', 'fixed_ips',
                            'security_groups', 'security_group_rules'}
    TEST.disabled_quotas.add(disabled_quotas_nova)

    # Quota Usages
    quota_usage_data = {'gigabytes': {'used': 0,
                                      'quota': 1000},
                        'instances': {'used': 0,
                                      'quota': 10},
                        'ram': {'used': 0,
                                'quota': 10000},
                        'cores': {'used': 0,
                                  'quota': 20},
                        'floating_ips': {'used': 0,
                                         'quota': 10},
                        'security_groups': {'used': 0,
                                            'quota': 10},
                        'volumes': {'used': 0,
                                    'quota': 10}}
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.quota_usages.add(quota_usage)

    # Limits
    limits = {"absolute": {"maxImageMeta": 128,
                           "maxPersonality": 5,
                           "maxPersonalitySize": 10240,
                           "maxSecurityGroupRules": 20,
                           "maxSecurityGroups": 10,
                           "maxServerMeta": 128,
                           "maxTotalCores": 20,
                           "maxTotalFloatingIps": 10,
                           "maxTotalInstances": 10,
                           "maxTotalKeypairs": 100,
                           "maxTotalRAMSize": 10000,
                           "totalCoresUsed": 0,
                           "totalInstancesUsed": 0,
                           "totalKeyPairsUsed": 0,
                           "totalRAMUsed": 0,
                           "totalSecurityGroupsUsed": 0}}
    TEST.limits = limits

    # Servers
    tenant3 = TEST.tenants.list()[2]

    vals = {"host": "http://nova.example.com:8774",
            "name": "server_1",
            "status": "ACTIVE",
            "tenant_id": TEST.tenants.first().id,
            "user_id": TEST.user.id,
            "server_id": "1",
            "flavor_id": flavor_1.id,
            "image_id": TEST.images.first().id,
            "key_name": keypair.name}
    server_1 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_2",
                 "status": "BUILD",
                 "server_id": "2"})
    server_2 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": u'\u4e91\u89c4\u5219',
                 "status": "ACTIVE",
                 "tenant_id": tenant3.id,
                "server_id": "3"})
    server_3 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_4",
                 "status": "PAUSED",
                 "server_id": "4"})
    server_4 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    TEST.servers.add(server_1, server_2, server_3, server_4)

    # VNC Console Data
    console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
                            u'type': u'novnc'}}
    TEST.servers.vnc_console_data = console
    # SPICE Console Data
    console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
                            u'type': u'spice'}}
    TEST.servers.spice_console_data = console
    # RDP Console Data
    console = {u'console': {u'url': u'http://example.com:6080/rdp_auto.html',
                            u'type': u'rdp'}}
    TEST.servers.rdp_console_data = console

    # Floating IPs
    def generate_fip(conf):
        return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
                                       conf)

    fip_1 = {'id': 1,
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_2 = {'id': 2,
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    # this floating ip is for lbaas tests
    fip_3 = {'id': 3,
             'fixed_ip': '10.0.0.5',
             # the underlying class maps the instance id to port id
             'instance_id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2),
                              generate_fip(fip_3))

    TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
                          nova.FloatingIp(generate_fip(fip_2)),
                          nova.FloatingIp(generate_fip(fip_3)))

    # Floating IP with UUID id (for Floating IP with Neutron Proxy)
    fip_3 = {'id': str(uuid.uuid4()),
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_4 = {'id': str(uuid.uuid4()),
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))

    TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
                               nova.FloatingIp(generate_fip(fip_4)))

    # Usage
    usage_vals = {"tenant_id": TEST.tenant.id,
                  "instance_name": server_1.name,
                  "flavor_name": flavor_1.name,
                  "flavor_vcpus": flavor_1.vcpus,
                  "flavor_disk": flavor_1.disk,
                  "flavor_ram": flavor_1.ram}
    usage_obj = usage.Usage(usage.UsageManager(None),
                            json.loads(USAGE_DATA % usage_vals))
    TEST.usages.add(usage_obj)

    usage_2_vals = {"tenant_id": tenant3.id,
                    "instance_name": server_3.name,
                    "flavor_name": flavor_1.name,
                    "flavor_vcpus": flavor_1.vcpus,
                    "flavor_disk": flavor_1.disk,
                    "flavor_ram": flavor_1.ram}
    usage_obj_2 = usage.Usage(usage.UsageManager(None),
                              json.loads(USAGE_DATA % usage_2_vals))
    TEST.usages.add(usage_obj_2)

    cert_data = {'private_key': 'private',
                 'data': 'certificate_data'}
    certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
    TEST.certs.add(certificate)

    # Availability Zones
    TEST.availability_zones.add(availability_zones.AvailabilityZone(
        availability_zones.AvailabilityZoneManager(None),
        {
            'zoneName': 'nova',
            'zoneState': {'available': True},
            'hosts': {
                "host001": {
                    "nova-network": {
                        "active": True,
                        "available": True,
                    },
                },
            },
        },
    ))

    # hypervisors
    hypervisor_1 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack001", "id": 3},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 1,
            "servers": [{"name": "test_name", "uuid": "test_uuid"}]
        },
    )

    hypervisor_2 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack002", "id": 4},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 2,
            "servers": [{"name": "test_name_2", "uuid": "test_uuid_2"}]
        },
    )
    hypervisor_3 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "instance-host", "id": 5},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack003",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 3,
        },
    )
    TEST.hypervisors.add(hypervisor_1)
    TEST.hypervisors.add(hypervisor_2)
    TEST.hypervisors.add(hypervisor_3)

    TEST.hypervisors.stats = {
        "hypervisor_statistics": {
            "count": 5,
            "vcpus_used": 3,
            "local_gb_used": 15,
            "memory_mb": 483310,
            "current_workload": 0,
            "vcpus": 160,
            "running_vms": 3,
            "free_disk_gb": 12548,
            "disk_available_least": 12556,
            "local_gb": 12563,
            "free_ram_mb": 428014,
            "memory_mb_used": 55296,
        }
    }

    # Services
    service_1 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-conductor",
        "zone": "internal",
        "state": "up",
        "updated_at": "2013-07-08T05:21:00.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_2 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T05:20:51.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_3 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "down",
        "updated_at": "2013-07-08T04:20:51.000000",
        "host": "devstack002",
        "disabled_reason": None,
    })

    service_4 = services.Service(services.ServiceManager(None), {
        "status": "disabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T04:20:51.000000",
        "host": "devstack003",
        "disabled_reason": None,
    })

    TEST.services.add(service_1)
    TEST.services.add(service_2)
    TEST.services.add(service_3)
    TEST.services.add(service_4)

    # Aggregates
    aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "foo",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 1,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "bar",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 2,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    TEST.aggregates.add(aggregate_1)
    TEST.aggregates.add(aggregate_2)

    host1 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack001",
        "service": "compute",
        "zone": "testing",
    })

    host2 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack002",
        "service": "nova-conductor",
        "zone": "testing",
    })

    host3 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack003",
        "service": "compute",
        "zone": "testing",
    })

    host4 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack004",
        "service": "compute",
        "zone": "testing",
    })

    TEST.hosts.add(host1)
    TEST.hosts.add(host2)
    TEST.hosts.add(host3)
    TEST.hosts.add(host4)

    server_group_1 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "1",
            "name": "server_group_1",
            "policies": [],
        },
    )

    server_group_2 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "2",
            "name": "server_group_2",
            "policies": ["affinity", "some_other_policy"],
        },
    )

    server_group_3 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "3",
            "name": "server_group_3",
            "policies": ["anti-affinity", "some_other_policy"],
        },
    )

    TEST.server_groups.add(server_group_1)
    TEST.server_groups.add(server_group_2)
    TEST.server_groups.add(server_group_3)

Example 10

Project: Fiona Source File: collect.py
@click.command(short_help="Collect a sequence of features.")
@cligj.precision_opt
@cligj.indent_opt
@cligj.compact_opt
@click.option('--record-buffered/--no-record-buffered', default=False,
              help="Economical buffering of writes at record, not collection "
              "(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
              help="log errors but do not stop serialization.")
@options.src_crs_opt
@click.option('--with-ld-context/--without-ld-context', default=False,
              help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
              help="map a term to a URI and add it to the output's JSON LD "
                   "context.")
@click.option('--parse/--no-parse', default=True,
              help="load and dump the geojson feature (default is True)")
@click.pass_context
def collect(ctx, precision, indent, compact, record_buffered, ignore_errors,
            src_crs, with_ld_context, add_ld_context_item, parse):
    """Make a GeoJSON feature collection from a sequence of GeoJSON
    features and print it."""
    verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
    logger = logging.getLogger('fio')
    stdin = click.get_text_stream('stdin')
    sink = click.get_text_stream('stdout')

    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')
    item_sep = compact and ',' or ', '

    if src_crs:
        if not parse:
            raise click.UsageError("Can't specify --src-crs with --no-parse")
        transformer = partial(transform_geom, src_crs, 'EPSG:4326',
                              antimeridian_cutting=True, precision=precision)
    else:
        transformer = lambda x: x

    first_line = next(stdin)

    # If parsing geojson
    if parse:
        # If input is RS-delimited JSON sequence.
        if first_line.startswith(u'\x1e'):
            def feature_text_gen():
                buffer = first_line.strip(u'\x1e')
                for line in stdin:
                    if line.startswith(u'\x1e'):
                        if buffer:
                            feat = json.loads(buffer)
                            feat['geometry'] = transformer(feat['geometry'])
                            yield json.dumps(feat, **dump_kwds)
                        buffer = line.strip(u'\x1e')
                    else:
                        buffer += line
                else:
                    feat = json.loads(buffer)
                    feat['geometry'] = transformer(feat['geometry'])
                    yield json.dumps(feat, **dump_kwds)
        else:
            def feature_text_gen():
                feat = json.loads(first_line)
                feat['geometry'] = transformer(feat['geometry'])
                yield json.dumps(feat, **dump_kwds)

                for line in stdin:
                    feat = json.loads(line)
                    feat['geometry'] = transformer(feat['geometry'])
                    yield json.dumps(feat, **dump_kwds)

    # If *not* parsing geojson
    else:
        # If input is RS-delimited JSON sequence.
        if first_line.startswith(u'\x1e'):
            def feature_text_gen():
                buffer = first_line.strip(u'\x1e')
                for line in stdin:
                    if line.startswith(u'\x1e'):
                        if buffer:
                            yield buffer
                        buffer = line.strip(u'\x1e')
                    else:
                        buffer += line
                else:
                    yield buffer
        else:
            def feature_text_gen():
                yield first_line
                for line in stdin:
                    yield line

    try:
        source = feature_text_gen()

        if record_buffered:
            # Buffer GeoJSON data at the feature level for smaller
            # memory footprint.
            indented = bool(indent)
            rec_indent = "\n" + " " * (2 * (indent or 0))

            collection = {
                'type': 'FeatureCollection',
                'features': []}
            if with_ld_context:
                collection['@context'] = helpers.make_ld_context(
                    add_ld_context_item)

            head, tail = json.dumps(collection, **dump_kwds).split('[]')

            sink.write(head)
            sink.write("[")

            # Try the first record.
            try:
                i, first = 0, next(source)
                if with_ld_context:
                    first = helpers.id_record(first)
                if indented:
                    sink.write(rec_indent)
                sink.write(first.replace("\n", rec_indent))
            except StopIteration:
                pass
            except Exception as exc:
                # Ignoring errors is *not* the default.
                if ignore_errors:
                    logger.error(
                        "failed to serialize file record %d (%s), "
                        "continuing",
                        i, exc)
                else:
                    # Log error and close up the GeoJSON, leaving it
                    # more or less valid no matter what happens above.
                    logger.critical(
                        "failed to serialize file record %d (%s), "
                        "quiting",
                        i, exc)
                    sink.write("]")
                    sink.write(tail)
                    if indented:
                        sink.write("\n")
                    raise

            # Because trailing commas aren't valid in JSON arrays
            # we'll write the item separator before each of the
            # remaining features.
            for i, rec in enumerate(source, 1):
                try:
                    if with_ld_context:
                        rec = helpers.id_record(rec)
                    if indented:
                        sink.write(rec_indent)
                    sink.write(item_sep)
                    sink.write(rec.replace("\n", rec_indent))
                except Exception as exc:
                    if ignore_errors:
                        logger.error(
                            "failed to serialize file record %d (%s), "
                            "continuing",
                            i, exc)
                    else:
                        logger.critical(
                            "failed to serialize file record %d (%s), "
                            "quiting",
                            i, exc)
                        sink.write("]")
                        sink.write(tail)
                        if indented:
                            sink.write("\n")
                        raise

            # Close up the GeoJSON after writing all features.
            sink.write("]")
            sink.write(tail)
            if indented:
                sink.write("\n")

        else:
            # Buffer GeoJSON data at the collection level. The default.
            collection = {
                'type': 'FeatureCollection',
                'features': []}
            if with_ld_context:
                collection['@context'] = helpers.make_ld_context(
                    add_ld_context_item)

            head, tail = json.dumps(collection, **dump_kwds).split('[]')
            sink.write(head)
            sink.write("[")
            sink.write(",".join(source))
            sink.write("]")
            sink.write(tail)
            sink.write("\n")

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()

Example 11

Project: api-samples Source File: 02_VulnInstancesSearchWorkFlow.py
Function: main
def main():
    # Constants
    TASK_CHECK_PROGRESS_WAIT_TIME = 5  # secs
    TASK_TIMEOUT = 18000  # secs

    # Create our client.
    client = client_module.RestApiClient(version='6.0')

    # Using the /qvm/saved_searches endpoint with a GET request to get 'High
    # risk' saved search
    saved_searches_endpoint_url = 'qvm/saved_searches?' \
        'filter=name="High%20risk"'
    SampleUtilities.pretty_print_request(client, saved_searches_endpoint_url,
                                         'GET')
    # URL Encoding
    # - convert a space into %20
    response = client.call_api(saved_searches_endpoint_url, 'GET')

    # Verify that the call to the API was successful.
    if (response.code != 200):
        print('Failed to retrieve saved search list.')
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # The number of saved searches retrieved.
    response_body = json.loads(response.read().decode('utf-8'))
    number_of_searches_retrieved = len(response_body)

    # Display number of searches, and the names of the searches retrieved.
    print(str(number_of_searches_retrieved) +
          ' saved searches were retrieved.\n')
    print('Available QVM Saved Searches:')
    print(json.dumps(response_body, indent=2))

    # Retrieve the saved search unique identifier
    saved_search_id = str(response_body[0]['id'])
    saved_search_name = str(response_body[0]['name'])

    print('Running saved search : ' + saved_search_name)

    # Create a vulnerability instances search by using saved search id
    # Using the /asset_model/saved_searches/{saved_search_id}/vuln_instances
    # endpoint with a GET request.
    # The search is asynchronous, so the response will not be the results of
    # the search.
    search_endpoint_url = ('qvm/saved_searches/' +
                           saved_search_id + '/vuln_instances')
    SampleUtilities.pretty_print_request(client, search_endpoint_url,
                                         'GET')
    response = client.call_api(search_endpoint_url, 'GET')
    response_body = json.loads(response.read().decode('utf-8'))

    # Check if the success code was returned to ensure the call to the API was
    # successful.
    if(response.code != 201):
        print(
            "Failed to create vulnerability instance search " +
            "with saved search id " + saved_search_id)
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # get the task id
    task_id = str(response_body['id'])
    # get retention_period_in_days
    retention_period_in_days = str(response_body['retention_period_in_days'])
    # task status
    status = str(response_body['status'])

    # Print response
    print('Task Id: ' + task_id)
    print('Retention period in days: ' + retention_period_in_days)
    print('Task Status: ' + status + '\n')

    # Check task status
    task_status_endpoint_url = ('qvm/saved_searches/vuln_instances/' +
                                task_id + '/status')
    SampleUtilities.pretty_print_request(client, task_status_endpoint_url,
                                         'GET')
    response = client.call_api(task_status_endpoint_url, 'GET')
    status = str(response_body['status'])
    print('Task Status: ' + status + '\n')

    # Verify that the call to the API was successful.
    if (response.code != 200):
        print(
            'Failed to retrieve status of a vulnerability instance search ' +
            'using task id: ' + task_id)
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # This block of code calls GET
    # /qvm/saved_searches/vuln_instances/{task_id}/status
    # on the QVM API to determine if the task is complete.
    # This block of code will repeat until the status of
    # the search is 'COMPLETE' or there is an error.
    response_body = json.loads(response.read().decode('utf-8'))
    task_progress_time = 0  # secs
    error = False
    while (response_body['status'] != 'COMPLETED') and not error:
        if (response_body['status'] == 'QUEUED') | \
                (response_body['status'] == 'PROCESSING'):
            # Check task status
            SampleUtilities.pretty_print_request(client,
                                                 task_status_endpoint_url,
                                                 'GET')
            response = client.call_api(task_status_endpoint_url, 'GET')

            # Verify that the call to the API was successful.
            if (response.code != 200):
                print(
                    'Failed to retrieve status of a vulnerability instance ' +
                    'search using task id: ' + task_id)
                SampleUtilities.pretty_print_response(response)
                sys.exit(1)

            response_body = json.loads(response.read().decode('utf-8'))
            status = str(response_body['status'])
            print('Task Status: ' + status + '\n')

            # Wait for 5 seconds before the call to the get task status API
            time.sleep(TASK_CHECK_PROGRESS_WAIT_TIME)
            task_progress_time = task_progress_time + \
                TASK_CHECK_PROGRESS_WAIT_TIME

            # If the search is queued or taking too long to process then we
            # should exit
            if (task_progress_time == TASK_TIMEOUT):
                print(
                    'Failed to run vulnerability instance search using ' +
                    'task id: %s due to timeout at %s secs.  Task status is %s'
                    % (task_id, TASK_TIMEOUT, response_body['status']))
                sys.exit(1)

        else:
            print(response_body['status'])
            error = True

    if (error):
        print(
            'Failed to retrieve current status of a vulnerability instance ' +
            'search using task id: ' + task_id)
        sys.exit(1)

    # After the search is complete, call the
    # GET /qvm/saved_searches/vuln_instances/{task_id}/results/vuln_instances
    # to obtain Vulnerability Instances returned from the search.
    vuln_instances_results_endpoint_url = (
        'qvm/saved_searches/vuln_instances/' + task_id +
        '/results/vuln_instances')
    # If this set contained a large amount of data, we might want to process it
    # a little bit at a time. We are going to use pagination to get 10 rows of
    # vulnerability instances results at a time until we get a total of 30 rows
    # back.
    # To get back only part of the data we can use a 'Range' header.
    # Note that the range header is indexed form zero, so here we are asking
    # for the first 10 items.
    for i in range(0, 3):

        print(
            '===============================================================' +
            '========================================\n')
        print(
            'Page %s.  Trying to get back 10 rows of ' % str(i + 1) +
            'vulnerability instances')
        range_header = {
            'Range': 'items=' + str(i * 10) + '-' + str((i * 10) + 9)}
        SampleUtilities.pretty_print_request(
            client, vuln_instances_results_endpoint_url, 'GET',
            headers=range_header)
        response = client.call_api(vuln_instances_results_endpoint_url,
                                   'GET', headers=range_header)

        # Verify that the call to the API was successful.
        if (response.code != 200):
            print(
                'Failed to retrieve vulnerability instances results using ' +
                'task id: ' + task_id)
            SampleUtilities.pretty_print_response(response)
            sys.exit(1)

        response_body = json.loads(response.read().decode('utf-8'))

        # Get the number of saved searches retrieved.
        number_of_vuln_instances_retrieved = len(response_body)

        # If we have vulnerability instances then get more info
        if (number_of_vuln_instances_retrieved > 0):

            # Display the number of vulnerability instances retrieved and
            # vulnerability instances JSON object
            print(str(number_of_vuln_instances_retrieved) +
                  ' vulnerability instances were retrieved.\n')
            print('Available vulnerability Instances:')
            print(json.dumps(response_body, indent=2))

            # Build a set of associated asset ids and vulnerability ids
            # (use sets because we don't care about duplicates)
            # so that we can use it to call other APIs to get more info
            asset_id_set = set()
            vuln_id_set = set()
            for vuln_instances in response_body:
                asset_id_set.add(str(vuln_instances['asset_id']))
                vuln_id_set.add(str(vuln_instances['vulnerability_id']))

            # convert list of ids into a comma separated list of string
            asset_id_str_list = ",".join(str(x) for x in asset_id_set)
            vuln_id_str_list = ",".join(str(x) for x in vuln_id_set)

            print('\nSet of asset ids: ' + asset_id_str_list)
            print('Set of vulnerability ids: ' + vuln_id_str_list + '\n')

            # get the number of assets
            num_of_assets = len(asset_id_set)
            print('Getting ' + str(num_of_assets) + ' rows of assets\n')

            # If we have assets then get more asset info
            if (num_of_assets > 0):
                # Get asset info by using a list of asset ids and
                # using the
                # /qvm/saved_searches/vuln_instances/{task_id}/results/assets
                # endpoint with a GET request.
                #     URL Encoding
                #         - convert a space into %20
                assets_results_endpoint_url = (
                    'qvm/saved_searches/vuln_instances/' +
                    task_id + '/results/assets?filter=id%20IN%20(' +
                    asset_id_str_list + ')')
                SampleUtilities.pretty_print_request(
                    client, assets_results_endpoint_url, 'GET')
                response = client.call_api(assets_results_endpoint_url,
                                           'GET')

                # Verify that the call to the API was successful.
                if (response.code != 200):
                    print(
                        'Failed to retrieve vulnerability instances results ' +
                        'using task id: ' + task_id)
                    SampleUtilities.pretty_print_response(response)
                    sys.exit(1)

                response_body = json.loads(response.read().decode('utf-8'))

                # Get the number of assets retrieved.
                number_of_assets_retrieved = len(response_body)

                # Display the number of assets retrieved and asset JSON objects
                print(str(number_of_assets_retrieved) +
                      ' assets were retrieved.\n')
                print('Available Assets:')
                print(json.dumps(response_body, indent=2))
                print('\n')

            # get the number of vulnerabilities
            num_of_vulns = len(vuln_id_set)
            print(
                'Getting ' + str(num_of_vulns) + ' rows of vulnerabilities\n')

            # If we have vulnerabilities then get more vulnerability info
            if (num_of_vulns > 0):
                # Get vulnerability info by using a list of vulnerability ids
                # and using the
                # /qvm/saved_searches/vuln_instances/{task_id}/results/vulnerabilities
                # endpoint with a GET request.
                #     URL Encoding
                #         - convert a space into %20
                vulnerabilities_results_endpoint_url = (
                    'qvm/saved_searches/vuln_instances/' +
                    task_id + '/results/vulnerabilities?filter=id%20IN%20(' +
                    vuln_id_str_list + ')')
                SampleUtilities.pretty_print_request(
                    client, vulnerabilities_results_endpoint_url, 'GET')
                response = client.call_api(
                    vulnerabilities_results_endpoint_url, 'GET')

                # Verify that the call to the API was successful.
                if (response.code != 200):
                    print(
                        'Failed to retrieve vulnerabilities results using ' +
                        'task id: ' + task_id)
                    SampleUtilities.pretty_print_response(response)
                    sys.exit(1)

                response_body = json.loads(response.read().decode('utf-8'))

                # Get the number of vulnerabilities retrieved.
                number_of_vulnerabilities_retrieved = len(response_body)

                # Display the number of vulnerabilities retrieved and asset
                # JSON objects
                print(str(number_of_vulnerabilities_retrieved) +
                      ' vulnerabilities were retrieved.\n')
                print('Available Vulnerabilities:')
                print(json.dumps(response_body, indent=2))

Example 12

Project: DataPillager Source File: DataServicePillager.py
Function: main
def main():
    global count_tries
    global max_tries
    global sleep_time

    start_time = datetime.datetime.today()

    try:
        # arcgis toolbox parameters
        service_endpoint = arcpy.GetParameterAsText(0) # Service endpoint required
        output_workspace = arcpy.GetParameterAsText(1) # gdb/folder to put the results required
        max_tries = arcpy.GetParameter(2) # max number of retries allowed required
        sleep_time = arcpy.GetParameter(3) # max number of retries allowed required`
        strict_mode = arcpy.GetParameter(4) # JSON check True/False required
        username = arcpy.GetParameterAsText(5)
        password = arcpy.GetParameterAsText(6)
        referring_domain = arcpy.GetParameterAsText(7) # auth domain
        existing_token = arcpy.GetParameterAsText(8) # valid token value

        # to query by geometry need [xmin,ymin,xmax,ymax], spatial reference, and geometryType (eg esriGeometryEnvelope

        if service_endpoint == '':
            output_msg("Avast! Can't plunder nothing from an empty url! Time to quit.")
            sys.exit()

        if not type(strict_mode) is bool:
            strict_mode = True

        if not type(max_tries) is int:
            max_tries = int(max_tries)

        if not type(sleep_time) is int:
           sleep_time = int(sleep_time)

        if not existing_token:
            token = ''
        else:
            token = existing_token

        if output_workspace == '':
            output_workspace = os.getcwd()

        output_desc = arcpy.Describe(output_workspace)
        output_type = output_desc.dataType

        if output_type == "Folder": # To Folder
            output_folder = output_workspace
        else:
            output_folder = output_desc.path

        if username:
            # set referring domain if supplied
            # or try to infer it from url
            if referring_domain != '':
                if referring_domain[:5] == 'http:':
                    refer = 'https' + referring_domain[4:]
                else:
                    refer = referring_domain
            else:
                u = urlparse(service_endpoint)
                if u.netloc.find('arcgis.com') > -1:
                    # is an esri domain
                    refer = r"https://www.arcgis.com"
                else:
                    # generate from service url and hope it works
                    if u.scheme == 'http':
                        # must be https for token
                        refer = urlunsplit(['https', u.netloc, '', '', ''])
                    else:
                        refer = urlunsplit([u.scheme, u.netloc, '', '', ''])

            # set up authentication
            # http://stackoverflow.com/questions/1045886/https-log-in-with-urllib2
            passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
            # this creates a password manager
            passman.add_password(None, service_endpoint, username, password)
            # because we have put None at the start it will always
            # use this username/password combination for  urls
            # for which `theurl` is a super-url

            authhandler = urllib2.HTTPBasicAuthHandler(passman)
            # create the AuthHandler
            opener = urllib2.build_opener(authhandler)
            # user agent spoofing
            opener.addheaders = [('User-agent', 'Mozilla/5.0')]

            urllib2.install_opener(opener)
            # All calls to urllib2.urlopen will now use our handler
            # Make sure not to include the protocol in with the URL, or
            # HTTPPasswordMgrWithDefaultRealm will be very confused.
            # You must (of course) use it when fetching the page though.
            # authentication is now handled automatically in urllib2.urlopen

            # add proxy handling?
            # issue where a proxy may not be picked up

            # need to generate a new token
            token = gentoken(username, password, refer)
        else:
            #build a generic opener with the use agent spoofed
            opener = urllib2.build_opener()
            opener.addheaders = [('User-agent', 'Mozilla/5.0')]
            urllib2.install_opener(opener)

        if username and (token == ""):
            output_msg("Avast! The scurvy gatekeeper says 'Could not generate a token with the username and password provided'.", severity=2)

        else:
            output_msg("Start the plunder! {0}".format(service_endpoint))
            output_msg("We be stashing the booty in {0}".format(output_workspace))

            service_layers_to_get = []
            # other variables, calculated from the service
            tokenstring = ''
            if len(token) > 0:
                tokenstring = '&token=' + token
            service_call = urllib2.urlopen(service_endpoint + '?f=json' + tokenstring).read()
            if service_call and (service_call.find('error') == -1):
                service_layer_info = json.loads(service_call, strict=False)
            else:
                raise Exception("'service_call' failed to access {0}".format(service_endpoint))

            # catch root url entered
            service_list = service_layer_info.get('services')
            if service_list:
                raise ValueError("Unable to pillage a service root url at this time. Enter a FeatureServer layer url!")

            # for getting all the layers
            service_layers = service_layer_info.get('layers')
            if service_layers is not None:
                # has sub layers, get em all
                for lyr in service_layers:
                    if not lyr.get('subLayerIds'):
                        lyr_id = lyr.get('id')
                        service_layers_to_get.append(service_endpoint + '/' + str(lyr_id))
            else:
                # no sub layers
                service_layers_to_get.append(service_endpoint)
            for lyr in service_layers_to_get:
                output_msg('Found {0}'.format(lyr))

            for slyr in service_layers_to_get:
                count_tries = 0
                out_shapefile_list = [] # for file merging.
                response = None
                current_iter = 0
                max_record_count = 0
                feature_count = 0
                final_geofile = ''

                output_msg("Now pillagin' yer data from {0}".format(slyr))
                if slyr == service_endpoint: # no need to get it again
                    service_info = service_layer_info
                else:
                    service_info_call = urllib2.urlopen(slyr + '?f=json' + tokenstring).read()
                    if service_info_call:
                        service_info = json.loads(service_info_call, strict=False)
                    else:
                        raise Exception("'service_info_call' failed to access {0}".format(slyr))

                if not service_info.get('error'):
                    service_name = service_info.get('name')

                    # clean up the service name (remove invalid characters)
                    service_name_cl = service_name.encode('ascii', 'ignore') # strip any non-ascii characters that may cause an issue
                    service_name_cl = arcpy.ValidateTableName(service_name_cl, output_workspace) # remove any other problematic characters
                    ##output_msg("'{0}' will be stashed as '{1}'".format(service_name, service_name_cl))

                    # add url & write out the service info for reference
                    service_info[u'serviceURL'] = slyr
                    info_filename = service_name_cl + "_info.txt"
                    info_file = os.path.join(output_folder, info_filename)
                    with open(info_file, 'w') as i_file:
                        json.dump(service_info, i_file, sort_keys=True, indent=4, separators=(',', ': '))
                        output_msg("Yar! {0} Service info stashed in '{1}'".format(service_name, info_file))

                    if strict_mode:
                        # check JSON supported
                        supports_json = False
                        if 'supportedQueryFormats' in service_info:
                            supported_formats = service_info.get('supportedQueryFormats').split(",")
                            for data_format in supported_formats:
                                if data_format == "JSON":
                                    supports_json = True
                                    break
                        else:
                            output_msg('Unable to check supported formats. Check {0} for details'.format(info_file))
                    else:
                        # assume JSON supported
                        supports_json = True

                    if supports_json:
                        try:
                            # loop through fields in service_info, get objectID field
                            objectid_field = "OBJECTID"
                            if 'fields' in service_info:
                                field_list = service_info.get('fields')
                                for field in field_list:
                                    if field.get('type') == 'esriFieldTypeOID':
                                        objectid_field = field.get('name')
                                        break
                            else:
                                output_msg("No field list returned - forging ahead with {0}".format(objectid_field))

                            feat_OIDLIST_query = r"/query?where=" + objectid_field + r"+%3E+0&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&distance=&units=esriSRUnit_Meter&outFields=&returnGeometry=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=true&returnCountOnly=false&returnExtentOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&f=json" + tokenstring
                            

                            # to query using geometry,&geometry=   &geometryType= esriGeometryEnvelope &inSR= and probably spatial relationship and buffering
                            feat_query = r"/query?objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&distance=&units=esriSRUnit_Meter&outFields=*&returnGeometry=true&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&returnExtentOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&f=json" + tokenstring
                            

                            max_record_count = service_info.get('maxRecordCount') # maximum number of records returned by service at once
                            

                            # extract using actual OID values is the safest way
                            feature_OIDs = None
                            feature_query = json.loads(urllib2.urlopen(slyr + feat_OIDLIST_query).read())
                            if feature_query and 'objectIds' in feature_query:
                                feature_OIDs = feature_query["objectIds"]
                            else:
                                raise ValueError('Unable to get OID values: {}'.format(feature_query))

                            if feature_OIDs:
                                feature_count = len(feature_OIDs)
                                sortie_count = feature_count//max_record_count + (feature_count % max_record_count > 0)
                                output_msg("{0} records, in chunks of {1}, err, that be {2} sorties. Ready lads!".format(feature_count, max_record_count, sortie_count))

                                feature_OIDs.sort()
                                # chunk them
                                for group in grouper(feature_OIDs, max_record_count):
                                    # reset count_tries
                                    count_tries = 0
                                    start_oid = group[0]
                                    end_oid = group[max_record_count-1]
                                    if end_oid is None: # reached the end of the iterables
                                        # loop through and find last oid
                                        # need this due to fillvalue of None in grouper
                                        for i in reversed(group):
                                            if i is not None:
                                                end_oid = i
                                                break

                                    # >= %3E%3D, <= %3C%3D
                                    where_clause = "&where={0}+%3E%3D+{1}+AND+{2}+%3C%3D+{3}".format(objectid_field, str(start_oid), objectid_field, str(end_oid))
                                    # response is a string of json with the attr and geom
                                    query = slyr + feat_query + where_clause
                                    response = get_data(query) # expects json object. An error will return none
                                    if not response or not response.get('features'):
                                        # break out
                                        raise ValueError("Abandon ship! Data access failed! Check what ye manag'd to plunder before failure.")
                                    else:
                                        feature_dict = response["features"] # load the features so we can check they are not empty

                                        if len(feature_dict) != 0:
                                            # convert response to json file on disk then to shapefile (is fast)
                                            out_JSON_name = service_name_cl + "_" + str(current_iter) + ".json"
                                            out_JSON_file = os.path.join(output_folder, out_JSON_name)

                                            #with open(out_JSON_file, 'w') as out_file:
                                            #    out_file.write(response.encode('utf-8')) #back from unicode

                                            with codecs.open(out_JSON_file, 'w', 'utf-8') as out_file:
                                                data = json.dumps(response, ensure_ascii=False)
                                                out_file.write(data)

                                            output_msg("Nabbed some json data fer ye: '{0}', oids {1} to {2}".format(out_JSON_name, start_oid, end_oid))

                                            if output_type == "Folder":
                                                out_file_name = service_name_cl + "_" + str(current_iter) + ".shp"
                                            else:
                                                out_file_name = service_name_cl + "_" + str(current_iter)
                                            # in-memory version
                                            ##temp_output = "in_memory\\"
                                            ##out_file_name = service_name_cl + "_" + str(current_iter)
                                            ##out_geofile = os.path.join(temp_output, out_file_name)

                                            out_geofile = os.path.join(output_workspace, out_file_name)

                                            output_msg("Converting json to {0}".format(out_geofile))
                                            arcpy.JSONToFeatures_conversion(out_JSON_file, out_geofile)
                                            out_shapefile_list.append(out_geofile)
                                            os.remove(out_JSON_file) # clean up the JSON file

                                        current_iter += max_record_count

                            else:
                                # no objectids
                                output_msg("No feature IDs found!")
                                raise ValueError("Aaar, plunderin' failed")

                            # download complete, create a final output
                            if output_type == "Folder":
                                final_geofile = os.path.join(output_workspace, service_name_cl + ".shp")
                            else:
                                final_geofile = os.path.join(output_workspace, service_name_cl)

                            output_msg("Stashin' all the booty in '{0}'".format(final_geofile))

                            #combine all the data
                            combine_data(out_shapefile_list, final_geofile)

                            end_time = datetime.datetime.today()
                            elapsed_time = end_time - start_time
                            output_msg("{0} plundered in {1}".format(final_geofile, str(elapsed_time)))

                        except ValueError, e:
                            output_msg("ERROR: " + str(e), severity=2)

                        except Exception, e:
                            line, err = trace()
                            output_msg("Script Error\n{0}\n on {1}".format(err, line), severity=2)
                            output_msg(arcpy.GetMessages())

                        finally:
                            if arcpy.Exists(final_geofile):
                                data_count = int(arcpy.GetCount_management(final_geofile)[0])
                                if data_count == feature_count: #we got it all
                                    output_msg("Scrubbing the decks...")
                                    for fc in out_shapefile_list:
                                        arcpy.Delete_management(fc)
                                else:
                                    output_msg("Splicin' the data failed - found {0} but expected {1}. Check {2} to see what went wrong.".format(data_count, feature_count, final_geofile))

                    else:
                        # no JSON output
                        output_msg("Aaaar, ye service does not support JSON output. Can't do it.")
                else:
                    # service info error
                    output_msg("Error: {0}".format(service_info.get('error')))

    except ValueError, e:
        output_msg("ERROR: " + str(e), severity=2)

    except Exception, e:
        if hasattr(e, 'errno') and e.errno == 10054:
            output_msg("ERROR: " + str(e), severity=2)
        else:
            line, err = trace()
            output_msg("Error\n{0}\n on {1}".format(err, line), severity=2)
        output_msg(arcpy.GetMessages())

    finally:
        end_time = datetime.datetime.today()
        elapsed_time = end_time - start_time
        output_msg("Plunderin' done, in " + str(elapsed_time))

Example 13

Project: BitXBay Source File: electrum_main.py
def main():
    global guiWindow
    parser = arg_parser()
    options, args = parser.parse_args()
    if options.portable and options.wallet_path is None:
        options.electrum_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'electrum_data')

    # config is an object passed to the various constructors (wallet, interface, gui)
    if is_android:
        config_options = {
            'portable': True,
            'verbose': True,
            'gui': 'android',
            'auto_cycle': True,
        }
    else:
        config_options = eval(str(options))
        for k, v in config_options.items():
            if v is None:
                config_options.pop(k)

    set_verbosity(config_options.get('verbose'))

    config = SimpleConfig(config_options)
    cmd = ''
    if len(args) == 0:
        url = None
        cmd = 'gui'
    elif len(args) == 1 and re.match('^bitcoin:', args[0]):
        url = args[0]
        cmd = 'gui'
    else:
        pass
        cmd = args[0]

    if cmd == 'gui':
        gui_name = 'stdio'#config.get('gui', 'classic')
        if gui_name in ['lite', 'classic']:
            gui_name = 'qt'
        try:
            gui = __import__('electrum_gui.' + gui_name, fromlist=['electrum_gui'])
        except ImportError:
            traceback.print_exc(file=sys.stdout)
            sys.exit()
            #sys.exit("Error: Unknown GUI: " + gui_name )

        # network interface
        if not options.offline:
            network = Network(config)
            network.start()
        else:
            network = None

        guiWindow = gui = gui.ElectrumGui(config, network)
        gui.main(url)

        if network:
            network.stop()

        # we use daemon threads, their termination is enforced.
        # this sleep command gives them time to terminate cleanly.
        time.sleep(0.1)
        sys.exit(0)

    if cmd not in known_commands:
        cmd = 'help'

    cmd = known_commands[cmd]

    # instanciate wallet for command-line
    storage = WalletStorage(config)


    if cmd.name in ['create', 'restore']:
        if storage.file_exists:
            sys.exit("Error: Remove the existing wallet first!")
        if options.password is not None:
            password = options.password
        elif cmd.name == 'restore' and options.mpk:
            password = None
        else:
            password = prompt_password("Password (hit return if you do not wish to encrypt your wallet):")

        # if config.server is set, the user either passed the server on command line
        # or chose it previously already. if he didn't pass a server on the command line,
        # we just pick up a random one.
        if not config.get('server'):
            config.set_key('server', pick_random_server())

        #fee = options.tx_fee if options.tx_fee else raw_input("fee (default:%s):" % (str(Decimal(wallet.fee)/100000000)))
        #gap = options.gap_limit if options.gap_limit else raw_input("gap limit (default 5):")
        #if fee:
        #    wallet.set_fee(float(fee)*100000000)
        #if gap:
        #    wallet.change_gap_limit(int(gap))

        if cmd.name == 'restore':
            if options.mpk:
                wallet = Wallet.from_mpk(options.mpk, storage)
            else:
                import getpass
                seed = getpass.getpass(prompt="seed:", stream=None) if options.concealed else raw_input("seed:")
                wallet = Wallet.from_seed(str(seed),storage)
                if not wallet:
                    sys.exit("Error: Invalid seed")
                wallet.save_seed(password)

            if not options.offline:
                network = Network(config)
                network.start()
                wallet.start_threads(network)
                print_msg("Recovering wallet...")
                wallet.restore(lambda x: x)
                if wallet.is_found():
                    print_msg("Recovery successful")
                else:
                    print_msg("Warning: Found no history for this wallet")
            else:
                wallet.synchronize()
                print_msg("Warning: This wallet was restored offline. It may contain more addresses than displayed.")

        else:
            wallet = Wallet(storage)
            wallet.init_seed(None)
            wallet.save_seed(password)
            wallet.synchronize()
            print_msg("Your wallet generation seed is:\n\"%s\"" % wallet.get_mnemonic(password))
            print_msg("Please keep it in a safe place; if you lose it, you will not be able to restore your wallet.")

        print_msg("Wallet saved in '%s'" % wallet.storage.path)

        # terminate
        sys.exit(0)


    if cmd.name not in ['create', 'restore'] and cmd.requires_wallet and not storage.file_exists:
        print_msg("Error: Wallet file not found.")
        print_msg("Type 'electrum create' to create a new wallet, or provide a path to a wallet with the -w option")
        sys.exit(0)


    if cmd.requires_wallet:
        wallet = Wallet(storage)
    else:
        wallet = None


    # important warning
    if cmd.name in ['dumpprivkey', 'dumpprivkeys']:
        print_msg("WARNING: ALL your private keys are secret.")
        print_msg("Exposing a single private key can compromise your entire wallet!")
        print_msg("In particular, DO NOT use 'redeem private key' services proposed by third parties.")

    # commands needing password
    if cmd.requires_password:
        if wallet.seed == '':
            seed = ''
            password = None
        elif wallet.use_encryption:
            password = prompt_password('Password:', False)
            if not password:
                print_msg("Error: Password required")
                sys.exit(1)
            # check password
            try:
                seed = wallet.get_seed(password)
            except Exception:
                print_msg("Error: This password does not decode this wallet.")
                sys.exit(1)
        else:
            password = None
            seed = wallet.get_seed(None)
    else:
        password = None

    # add missing arguments, do type conversions
    if cmd.name == 'importprivkey':
        # See if they specificed a key on the cmd line, if not prompt
        if len(args) == 1:
            args[1] = prompt_password('Enter PrivateKey (will not echo):', False)

    elif cmd.name == 'signrawtransaction':
        args = [cmd, args[1], json.loads(args[2]) if len(args) > 2 else [], json.loads(args[3]) if len(args) > 3 else []]

    elif cmd.name == 'createmultisig':
        args = [cmd, int(args[1]), json.loads(args[2])]

    elif cmd.name == 'createrawtransaction':
        args = [cmd, json.loads(args[1]), json.loads(args[2])]

    elif cmd.name == 'listaddresses':
        args = [cmd, options.show_all, options.show_labels]

    elif cmd.name in ['payto', 'mktx']:
        domain = [options.from_addr] if options.from_addr else None
        args = ['mktx', args[1], Decimal(args[2]), Decimal(options.tx_fee) if options.tx_fee else None, options.change_addr, domain]

    elif cmd.name in ['paytomany', 'mksendmanytx']:
        domain = [options.from_addr] if options.from_addr else None
        outputs = []
        for i in range(1, len(args), 2):
            if len(args) < i+2:
                print_msg("Error: Mismatched arguments.")
                sys.exit(1)
            outputs.append((args[i], Decimal(args[i+1])))
        args = ['mksendmanytx', outputs, Decimal(options.tx_fee) if options.tx_fee else None, options.change_addr, domain]

    elif cmd.name == 'help':
        if len(args) < 2:
            print_help(parser)

    # check the number of arguments
    if len(args) - 1 < cmd.min_args:
        print_msg("Not enough arguments")
        print_msg("Syntax:", cmd.syntax)
        sys.exit(1)

    if cmd.max_args >= 0 and len(args) - 1 > cmd.max_args:
        print_msg("too many arguments", args)
        print_msg("Syntax:", cmd.syntax)
        sys.exit(1)

    if cmd.max_args < 0:
        if len(args) > cmd.min_args + 1:
            message = ' '.join(args[cmd.min_args:])
            print_msg("Warning: Final argument was reconstructed from several arguments:", repr(message))
            args = args[0:cmd.min_args] + [message]



    # run the command
    if cmd.name == 'deseed':
        if not wallet.seed:
            print_msg("Error: This wallet has no seed")
        else:
            ns = wallet.storage.path + '.seedless'
            print_msg("Warning: you are going to create a seedless wallet'\nIt will be saved in '%s'" % ns)
            if raw_input("Are you sure you want to continue? (y/n) ") in ['y', 'Y', 'yes']:
                wallet.storage.path = ns
                wallet.seed = ''
                wallet.storage.put('seed', '', True)
                wallet.use_encryption = False
                wallet.storage.put('use_encryption', wallet.use_encryption, True)
                for k in wallet.imported_keys.keys():
                    wallet.imported_keys[k] = ''
                wallet.storage.put('imported_keys', wallet.imported_keys, True)
                print_msg("Done.")
            else:
                print_msg("Action canceled.")

    elif cmd.name == 'getconfig':
        key = args[1]
        out = config.get(key)
        print_msg(out)

    elif cmd.name == 'setconfig':
        key, value = args[1:3]
        try:
            value = ast.literal_eval(value)
        except:
            pass
        config.set_key(key, value, True)
        print_msg(True)

    elif cmd.name == 'password':
        new_password = prompt_password('New password:')
        wallet.update_password(password, new_password)

    else:
        run_command(cmd, password, args)


    time.sleep(0.1)
    sys.exit(0)

Example 14

Project: baidu-fuse Source File: baidufuse.py
    def _add_file_to_buffer(self, path,file_info):
        foo = File()
        foo['st_ctime'] = file_info['local_ctime']
        foo['st_mtime'] = file_info['local_mtime']
        foo['st_mode'] = (stat.S_IFDIR | 0777) if file_info['isdir'] \
            else (stat.S_IFREG | 0777)
        foo['st_nlink'] = 2 if file_info['isdir'] else 1
        foo['st_size'] = file_info['size']
        self.buffer[path] = foo

    def _del_file_from_buffer(self,path):
        self.buffer.pop(path)

    def getattr(self, path, fh=None):
        #print 'getattr *',path
        # 先看缓存中是否存在该文件

        if not self.buffer.has_key(path):
            print path,'未命中'
            #print self.buffer
            #print self.traversed_folder
            jdata = json.loads(self.disk.meta([path]).content)
            try:
                if 'info' not in jdata:
                    raise FuseOSError(errno.ENOENT)
                if jdata['errno'] != 0:
                    raise FuseOSError(errno.ENOENT)
                file_info = jdata['info'][0]
                self._add_file_to_buffer(path,file_info)
                st = self.buffer[path].getDict()
                return st
            except:
                raise FuseOSError(errno.ENOENT)
        else:
            #print path,'命中'
            return self.buffer[path].getDict()



    def readdir(self, path, offset):
        self.uploadLock.acquire()
        while True:
            try:
                foo = json.loads(self.disk.list_files(path).text)
                break
            except:
                print 'error'


        files = ['.', '..']
        abs_files = [] # 该文件夹下文件的绝对路径
        for file in foo['list']:
            files.append(file['server_filename'])
            abs_files.append(file['path'])
        # 缓存文件夹下文件信息,批量查询meta info

        # Update:解决meta接口一次不能查询超过100条记录
        # 分成 ceil(file_num / 100.0) 组,利用商群
        if not self.traversed_folder.has_key(path) or self.traversed_folder[path] == False:
            print '正在对',path,'缓存中'
            file_num = len(abs_files)
            group = int(math.ceil(file_num / 100.0))
            for i in range(group):
                obj = [f for n,f in enumerate(abs_files) if n % group == i] #一组数据
                while 1:
                    try:
                        ret = json.loads(self.disk.meta(obj).text)
                        break
                    except:
                        print 'error'

                for file_info in ret['info']:
                    if not self.buffer.has_key(file_info['path']):
                        self._add_file_to_buffer(file_info['path'],file_info)
            #print self.buffer
            print '对',path,'的缓存完成'
            self.traversed_folder[path] = True
        for r in files:
            yield r
        self.uploadLock.release()

    def _update_file_manual(self,path):
        while 1:
            try:
                jdata = json.loads(self.disk.meta([path]).content)
                break
            except:
                print 'error'

        if 'info' not in jdata:
            raise FuseOSError(errno.ENOENT)
        if jdata['errno'] != 0:
            raise FuseOSError(errno.ENOENT)
        file_info = jdata['info'][0]
        self._add_file_to_buffer(path,file_info)


    def rename(self, old, new):
        #logging.debug('* rename',old,os.path.basename(new))
        print '*'*10,'RENAME CALLED',old,os.path.basename(new),type(old),type(new)
        while True:
            try:
                ret = self.disk.rename([(old,os.path.basename(new))]).content
                jdata = json.loads(ret)
                break
            except:
                print 'error'

        if jdata['errno'] != 0:
            # 文件名已存在,删除原文件
            print self.disk.delete([new]).content
            print self.disk.rename([(old,os.path.basename(new))])
        self._update_file_manual(new)
        self.buffer.pop(old)


    def open(self, path, flags):
        self.readLock.acquire()
        print '*'*10,'OPEN CALLED',path,flags
        #print '[cuem]',path
        """
        Permission denied

        accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
        if (flags & accmode) != os.O_RDONLY:
            raise FuseOSError(errno.EACCES)
        """
        self.fd += 1
        self.readLock.release()
        
        return self.fd

    def create(self, path, mode,fh=None):
        # 创建文件
        # 中文路径有问题
        print '*'*10,'CREATE CALLED',path,mode,type(path)
        #if 'outputstream' not in path:
        tmp_file = tempfile.TemporaryFile('r+w+b')
        foo = self.disk.upload(os.path.dirname(path),tmp_file,os.path.basename(path)).content
        ret = json.loads(foo)
        print ret
        print 'create-not-outputstream',ret
        if ret['path'] != path:
            # 文件已存在
            print '文件已存在'
            raise FuseOSError(errno.EEXIST)
        '''
        else:
            print 'create:',path
            foo = File()
            foo['st_ctime'] = int(time.time())
            foo['st_mtime'] = int(time.time())
            foo['st_mode'] = (stat.S_IFREG | 0777)
            foo['st_nlink'] = 1
            foo['st_size'] = 0
            self.buffer[path] = foo
        '''


        '''
        dict(st_mode=(stat.S_IFREG | mode), st_nlink=1,
                                st_size=0, st_ctime=time.time(), st_mtime=time.time(),
                                st_atime=time.time())
        '''
        self.fd += 1
        return 0

    def write(self, path, data, offset, fp):
        # 上传文件时会调用
        # 4kb ( 4096 bytes ) 每块,data中是块中的数据
        # 最后一块的判断:len(data) < 4096
        # 文件大小 = 最后一块的offset + len(data)

        # 4kb传太慢了,合计成2M传一次

        #print '*'*10,path,offset, len(data)

        def _block_size(stream):
            stream.seek(0,2)
            return stream.tell()

        _BLOCK_SIZE = 16 * 2 ** 20
        # 第一块的任务
        if offset == 0:
            #self.uploadLock.acquire()
            #self.readLock.acquire()
            # 初始化块md5列表
            self.upload_blocks[path] = {'tmp':None,
                                        'blocks':[]}
            # 创建缓冲区临时文件
            tmp_file = tempfile.TemporaryFile('r+w+b')
            self.upload_blocks[path]['tmp'] = tmp_file

        # 向临时文件写入数据,检查是否>= _BLOCK_SIZE 是则上传该块并将临时文件清空
        try:
            tmp = self.upload_blocks[path]['tmp']
        except KeyError:
            return 0
        tmp.write(data)

        if _block_size(tmp) > _BLOCK_SIZE:
            print path,'发生上传'
            tmp.seek(0)
            try:
                foo = self.disk.upload_tmpfile(tmp,callback=ProgressBar()).content
                foofoo = json.loads(foo)
                block_md5 = foofoo['md5']
            except:
                 print foo



            # 在 upload_blocks 中插入本块的 md5
            self.upload_blocks[path]['blocks'].append(block_md5)
            # 创建缓冲区临时文件
            self.upload_blocks[path]['tmp'].close()
            tmp_file = tempfile.TemporaryFile('r+w+b')
            self.upload_blocks[path]['tmp'] = tmp_file
            print '创建临时文件',tmp_file.name

        # 最后一块的任务
        if len(data) < 4096:
            # 检查是否有重名,有重名则删除它
            while True:
                try:
                    foo = self.disk.meta([path]).content
                    foofoo = json.loads(foo)
                    break
                except:
                    print 'error'


            if foofoo['errno'] == 0:
                logging.debug('Deleted the file which has same name.')
                self.disk.delete([path])
            # 看看是否需要上传
            if _block_size(tmp) != 0:
                # 此时临时文件有数据,需要上传
                print path,'发生上传,块末尾,文件大小',_block_size(tmp)
                tmp.seek(0)
                while True:
                    try:
                        foo = self.disk.upload_tmpfile(tmp,callback=ProgressBar()).content
                        foofoo = json.loads(foo)
                        break
                    except:
                        print 'exception, retry.'

                block_md5 = foofoo['md5']
                # 在 upload_blocks 中插入本块的 md5
                self.upload_blocks[path]['blocks'].append(block_md5)

            # 调用 upload_superfile 以合并块文件
            print '合并文件',path,type(path)
            self.disk.upload_superfile(path,self.upload_blocks[path]['blocks'])
            # 删除upload_blocks中数据
            self.upload_blocks.pop(path)
            # 更新本地文件列表缓存
            self._update_file_manual(path)
            #self.readLock.release()
            #self.uploadLock.release()
        return len(data)


    def mkdir(self, path, mode):
        logger.debug("mkdir is:" + path)
        self.disk.mkdir(path)

    def rmdir(self, path):
        logger.debug("rmdir is:" + path)
        self.disk.delete([path])

    def read(self, path, size, offset, fh):
        #print '*'*10,'READ CALLED',path,size,offset
        #logger.debug("read is: " + path)
        paras = {'Range': 'bytes=%s-%s' % (offset, offset + size - 1)}
        while True:
            try:
                foo = self.disk.download(path, headers=paras).content
                return foo
            except:
                pass

    access = None
    statfs = None

Example 15

Project: perf-benchmarks Source File: network_io.py
def network_io_test(itype1, image1, region1, itype2, image2, region2, filesize=64, iteration=1, timeout=600): 
    
    ssh_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../.ssh/')

    if itype1 in ec2.ec2_instance_types:
        inst1 = ec2.EC2Inst(itype1, image1, region1, 'ubuntu', '%s/perf-bench-%s.pem'\
                            % (ssh_path, region1), 'perf-bench-%s' % region1)
    if itype1 in gce.gce_instance_types:
        inst1 = gce.GCEInst(itype1, image1, region1, os.environ['USER'], '%s/google_compute_engine' % ssh_path)

    if itype2 in ec2.ec2_instance_types:
        inst2 = ec2.EC2Inst(itype2, image2, region2, 'ubuntu', '%s/perf-bench-%s.pem'\
                            % (ssh_path, region2), 'perf-bench-%s' % region2)
    if itype2 in gce.gce_instance_types:
        inst2 = gce.GCEInst(itype2, image2, region2, os.environ['USER'], '%s/google_compute_engine' % ssh_path)

    inst1.launch()
    inst2.launch()

    try:

        print '[IP] waiting'
        for i in range(150):
            inst1.update()
            inst2.update()
            if inst1.remote_ip != None and inst2.remote_ip != None:
                print '[IP] ok'
                break
            time.sleep(2)
        
        print '[SSH] waiting'
        for i in range(120):
            try:
                telnetlib.Telnet(inst1.remote_ip, 22, 1)
                telnetlib.Telnet(inst2.remote_ip, 22, 1)
                print '[SSH] ok'
                break
            except:
                time.sleep(2)
        
        print '[UP] %s | %s | %s' % (inst1.itype, inst1.region, inst1.remote_ip)
        print '[UP] %s | %s | %s' % (inst2.itype, inst2.region, inst2.remote_ip)

        util.instances_prepare([inst1, inst2], ['iperf', 'screen'])

        ssh_cli = paramiko.SSHClient()
        ssh_cli.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh_cli.connect(inst1.remote_ip, username=inst1.user, key_filename=inst1.ssh_key)
        scp_cli = scp.SCPClient(ssh_cli.get_transport())
        file_path = os.path.dirname(os.path.abspath(__file__))
        scp_cli.put('%s/netcattest.py' % file_path, '/tmp/netcattest.py')
        scp_cli.put('%s/scptest.py' % file_path, '/tmp/scptest.py')
        scp_cli.put('%s/iperftest.py' % file_path, '/tmp/iperftest.py')

        print '[START TESTS] %s %s <-----> %s %s'\
                % (inst1.itype, inst1.region, inst2.itype, inst2.region)

        for i in range(iteration):

            print '[START ITERATION %s]' % i

            print '[START] netcat'
            stdin, stdout, stderr = ssh_cli.exec_command('python2.7 /tmp/netcattest.py -i %s -u %s -k %s -s %s -t %s'
                                 % (inst2.remote_ip, inst2.user, inst2.ssh_key, filesize, timeout))
            time.sleep(10)
            for _ in range(timeout / 5 + 1):
                stdin, stdout, stderr = ssh_cli.exec_command('[ -f netcat.report ]; echo $?')
                out = stdout.read()
                if out.strip() == '0':
                    stdin, stdout, stderr = ssh_cli.exec_command('cat netcat.report')
                    out = stdout.read()
                    report = json.loads(out)
                    report.update({'inst1':inst1.itype, 'inst2':inst2.itype})
                    report.update({'region1':inst1.region, 'region2':inst2.region})
                    report.update({'cloud1':inst1.cloud, 'cloud2':inst2.cloud})
                    report_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../results/network-io/netcat')
                    if not os.path.exists(report_path):
                        cmd = 'mkdir -p %s' % report_path
                        subps.call(cmd.split())
                        #os.mkdir(report_path)
                    with open('%s/%s-%s__%s-%s' % (report_path, inst1.itype, inst1.region, inst2.itype, inst2.region), 'a+') as f:
                        f.write(json.dumps(report, indent=4, sort_keys=True))
                        f.write('\n')
                    print report['time']
                    break
                else:
                    time.sleep(5)
            print '[END] netcat'

            print '[START] scp'
            stdin, stdout, stderr = ssh_cli.exec_command('python2.7 /tmp/scptest.py -i %s -u %s -k %s -s %s -t %s'
                                 % (inst2.remote_ip, inst2.user, inst2.ssh_key, filesize, timeout))
            time.sleep(10)
            for _ in range(timeout / 5 + 1):
                stdin, stdout, stderr = ssh_cli.exec_command('[ -f scp.report ]; echo $?')
                out = stdout.read()
                if out.strip() == '0':
                    stdin, stdout, stderr = ssh_cli.exec_command('cat scp.report')
                    out = stdout.read()
                    report = json.loads(out)
                    report.update({'inst1':inst1.itype, 'inst2':inst2.itype})
                    report.update({'region1':inst1.region, 'region2':inst2.region})
                    report.update({'cloud1':inst1.cloud, 'cloud2':inst2.cloud})
                    report_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../results/network-io/scp')
                    if not os.path.exists(report_path):
                        cmd = 'mkdir -p %s' % report_path
                        subps.call(cmd.split())
                        #os.mkdir(report_path)
                    with open('%s/%s-%s__%s-%s' % (report_path, inst1.itype, inst1.region, inst2.itype, inst2.region), 'a+') as f:
                        f.write(json.dumps(report, indent=4, sort_keys=True))
                        f.write('\n')
                    print report['time']
                    break
                else:
                    time.sleep(5)
            print '[END] scp'

            print '[START] iperf'
            threads = [1, 4, 8] 
            work_time = 5
            stdin, stdout, stderr = ssh_cli.exec_command('python2.7 /tmp/iperftest.py -i %s -u %s -k %s -p %s -t %s'
                                 % (inst2.remote_ip, inst2.user, inst2.ssh_key, ' '.join(map(str, threads)), work_time))
            time.sleep(10)
            for _ in range(len(threads) * (work_time + 10) / 5 + 1):
                stdin, stdout, stderr = ssh_cli.exec_command('[ -f iperf.report ]; echo $?')
                out = stdout.read()
                if out.strip() == '0':
                    stdin, stdout, stderr = ssh_cli.exec_command('cat iperf.report')
                    out = stdout.read()
                    report = json.loads(out)
                    report.update({'inst1':inst1.itype, 'inst2':inst2.itype})
                    report.update({'region1':inst1.region, 'region2':inst2.region})
                    report.update({'cloud1':inst1.cloud, 'cloud2':inst2.cloud})
                    report_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../results/network-io/iperf')
                    if not os.path.exists(report_path):
                        cmd = 'mkdir -p %s' % report_path
                        subps.call(cmd.split())
                        #os.mkdir(report_path)
                    with open('%s/%s-%s__%s-%s' % (report_path, inst1.itype, inst1.region, inst2.itype, inst2.region), 'a+') as f:
                        f.write(json.dumps(report, indent=4, sort_keys=True))
                        f.write('\n')
                    print report['speed']
                    break
                else:
                    time.sleep(5)
            print '[END] iperf'

        ssh_cli.close()
    except Exception:

        print '[EXCEPTION] %s\n' % traceback.print_exc()

    finally:

        inst1.terminate()
        inst2.terminate()

Example 16

Project: MediaBrowser.Kodi Source File: ItemInfo.py
    def onInit(self):
        self.action_exitkeys_id = [10, 13]
        
        __settings__ = xbmcaddon.Addon(id='plugin.video.xbmb3c')
        port = __settings__.getSetting('port')
        host = __settings__.getSetting('ipaddress')
        server = host + ":" + port
        self.server = server         
        db = Database()
        userid = self.downloadUtils.getUserId()
        self.userid = userid
       
        jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + self.id + "?Fields=SeriesGenres,AirTime&format=json", suppress=False, popup=1 )     
        item = json.loads(jsonData)
        self.item = item
        
        id = item.get("Id")
        WINDOW = xbmcgui.Window( 10025 )
        WINDOW.setProperty('ItemGUID', id)
        
        name = item.get("Name")
        image = self.downloadUtils.getArtwork(item, "poster")
        fanArt = self.downloadUtils.getArtwork(item, "BackdropNoIndicators")
        discart = db.get(id +".Disc")
        # calculate the percentage complete
        userData = item.get("UserData")
        cappedPercentage = 0
        
        if(userData != None):
            playBackTicks = float(userData.get("PlaybackPositionTicks"))
            if(playBackTicks != None and playBackTicks > 0):
                runTimeTicks = float(item.get("RunTimeTicks", "0"))
                if(runTimeTicks > 0):
                    percentage = int((playBackTicks / runTimeTicks) * 100.0)
                    cappedPercentage = percentage - (percentage % 10)
                    if(cappedPercentage == 0):
                        cappedPercentage = 10
                    if(cappedPercentage == 100):
                        cappedPercentage = 90
            
            try:
                watchedButton = self.getControl(3192)
            except:
                watchedButton = None
            if(watchedButton != None):
                if userData.get("Played") == True:
                    watchedButton.setSelected(True)
                else:
                    watchedButton.setSelected(False)
            
            try:
                dislikeButton = self.getControl(3193)
            except:
                dislikeButton = None            
            if(dislikeButton != None):
                if userData.get("Likes") != None and userData.get("Likes") == False:
                    dislikeButton.setSelected(True)
                else:
                    dislikeButton.setSelected(False)
                    
            try:
                likeButton = self.getControl(3194)
            except:
                likeButton = None                       
            if(likeButton != None):
                if userData.get("Likes") != None and userData.get("Likes") == True:
                    likeButton.setSelected(True)
                else:
                    likeButton.setSelected(False)
                    
            try:
                favouriteButton = self.getControl(3195)
            except:
                favouriteButton = None
            if(favouriteButton != None):
                if userData.get("IsFavorite") == True:
                    favouriteButton.setSelected(True)
                else:
                    favouriteButton.setSelected(False)
          
        
        episodeInfo = ""
        type = item.get("Type")
        WINDOW.setProperty('ItemType', type)
        if(type == "Episode" or type == "Season"):
            WINDOW.setProperty('ItemGUID', item.get("SeriesId"))
            name = item.get("SeriesName") + ": " + name
            season = str(item.get("ParentIndexNumber")).zfill(2)
            episodeNum = str(item.get("IndexNumber")).zfill(2)
            episodeInfo = "S" + season + "xE" + episodeNum
        elif type == "Movie":
            if item.get("Taglines") != None and item.get("Taglines") != [] and item.get("Taglines")[0] != None:
                episodeInfo = item.get("Taglines")[0]
        elif type == "ChannelVideoItem":
            if item.get("ExtraType") != None:
                if item.get('ExtraType') == "Trailer":
                    self.isTrailer = True
                
            
        url =  server + ',;' + id
        url = urllib.quote(url)
        self.playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            
        self.peopleUrl = "XBMC.Container.Update(plugin://plugin.video.xbmb3c?mode=" + str(_MODE_CAST_LIST) + "&id=" + id + ")"
        #self.peopleUrl = "XBMC.RunPlugin(plugin://plugin.video.xbmb3c?mode=" + str(_MODE_CAST_LIST) + "&id=" + id + ")"
        
        try:
            trailerButton = self.getControl(3102)
            if(trailerButton != None):
                if not self.isTrailer and item.get("LocalTrailerCount") != None and item.get("LocalTrailerCount") > 0:
                    itemTrailerUrl = "http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + id + "/LocalTrailers?format=json"
                    jsonData = self.downloadUtils.downloadUrl(itemTrailerUrl, suppress=False, popup=1 ) 
                    trailerItem = json.loads(jsonData)
                    trailerUrl = server + ',;' + trailerItem[0].get("Id")
                    trailerUrl = urllib.quote(trailerUrl) 
                    self.trailerUrl = "plugin://plugin.video.xbmb3c/?mode=" + str(_MODE_BASICPLAY) + "&url=" + trailerUrl
                else:
                    trailerButton.setEnabled(False)
        except:
            pass
        
        try:
            couchPotatoButton = self.getControl(3103)
            if(couchPotatoButton != None):
                if self.isTrailer and item.get("ProviderIds") != None and item.get("ProviderIds").get("Imdb") != None:
                    self.couchPotatoUrl = CP_ADD_VIA_IMDB + item.get("ProviderIds").get("Imdb")
                elif self.isTrailer:
                    self.couchPotatoUrl = CP_ADD_URL + name
                elif not self.isTrailer:
                    couchPotatoButton.setEnabled(False)
        except:
            pass

        # all the media stream info
        mediaList = self.getControl(3220)
        
        mediaStreams = item.get("MediaStreams")
        if(mediaStreams != None):
            for mediaStream in mediaStreams:
                if(mediaStream.get("Type") == "Video"):
                    videocodec = mediaStream.get("Codec")
                    if(videocodec == "mpeg2video"):
                        videocodec = "mpeg2"
                    height = str(mediaStream.get("Height"))
                    width = str(mediaStream.get("Width"))
                    aspectratio = mediaStream.get("AspectRatio")
                    fr = mediaStream.get("RealFrameRate")
                    videoInfo = width + "x" + height + " " + videocodec + " " + str(round(fr, 2))
                    listItem = xbmcgui.ListItem("Video:", videoInfo)
                    mediaList.addItem(listItem)
                if(mediaStream.get("Type") == "Audio"):
                    audiocodec = mediaStream.get("Codec")
                    channels = mediaStream.get("Channels")
                    lang = mediaStream.get("Language")
                    audioInfo = audiocodec + " " + str(channels)
                    if(lang != None and len(lang) > 0 and lang != "und"):
                        audioInfo = audioInfo + " " + lang
                    listItem = xbmcgui.ListItem("Audio:", audioInfo)
                    mediaList.addItem(listItem)
                if(mediaStream.get("Type") == "Subtitle"):
                    lang = mediaStream.get("Language")
                    codec = mediaStream.get("Codec")
                    subInfo = codec
                    if(lang != None and len(lang) > 0 and lang != "und"):
                        subInfo = subInfo + " " + lang
                    listItem = xbmcgui.ListItem("Sub:", subInfo)
                    mediaList.addItem(listItem)

        
        #for x in range(0, 10):
        #    listItem = xbmcgui.ListItem("Test:", "Test 02 " + str(x))
        #    mediaList.addItem(listItem)
        
        # add overview
        overview = item.get("Overview")
        self.getControl(3223).setText(overview)
        
        # add people
        peopleList = self.getControl(3230)
        people = item.get("People")
        director=''
        writer=''
        for person in people:
            displayName = person.get("Name")
            if person.get("Role") != None and person.get("Role") != '':
               role = "as " + person.get("Role")
            else:
               role = ''
            id = person.get("Id")
            tag = person.get("PrimaryImageTag")
            
            baseName = person.get("Name")
            baseName = baseName.replace(" ", "+")
            baseName = baseName.replace("&", "_")
            baseName = baseName.replace("?", "_")
            baseName = baseName.replace("=", "_")
            
            actionUrl = "plugin://plugin.video.xbmb3c?mode=" + str(_MODE_PERSON_DETAILS) +"&name=" + baseName
            
            if(tag != None and len(tag) > 0):
                thumbPath = self.downloadUtils.imageUrl(id, "Primary", 0, 400, 400)
                listItem = xbmcgui.ListItem(label=displayName, label2=role, iconImage=thumbPath, thumbnailImage=thumbPath)
            else:
                listItem = xbmcgui.ListItem(label=displayName, label2=role)
                
            listItem.setProperty("ActionUrl", actionUrl)
            peopleList.addItem(listItem)
            if(person.get("Type") == "Director") and director =='':
                director = displayName
                if(tag != None and len(tag) > 0):
                  thumbPath = self.downloadUtils.imageUrl(id, "Primary", 0, 580, 860)
                  directorlistItem = xbmcgui.ListItem("Director:", label2=displayName, iconImage=thumbPath, thumbnailImage=thumbPath)
                else:
                  directorlistItem = xbmcgui.ListItem("Director:", label2=displayName)
                directorlistItem.setProperty("ActionUrl", actionUrl)  
            if(person.get("Type") == "Writing") and writer == '':
                writer = person.get("Name")
                if(tag != None and len(tag) > 0):
                  thumbPath = self.downloadUtils.imageUrl(id, "Primary", 0, 580, 860)
                  writerlistItem = xbmcgui.ListItem("Writer:", label2=displayName, iconImage=thumbPath, thumbnailImage=thumbPath)
                else:
                  writerlistItem = xbmcgui.ListItem("Writer:", label2=displayName)
                writerlistItem.setProperty("ActionUrl", actionUrl) 
            if(person.get("Type") == "Writer") and writer == '':
                writer = person.get("Name")    
                if(tag != None and len(tag) > 0):
                  thumbPath = self.downloadUtils.imageUrl(id, "Primary", 0, 580, 860)
                  writerlistItem = xbmcgui.ListItem("Writer:", label2=displayName, iconImage=thumbPath, thumbnailImage=thumbPath)
                else:
                  writerlistItem = xbmcgui.ListItem("Writer:", label2=displayName)
                writerlistItem.setProperty("ActionUrl", actionUrl)
        # add general info
        infoList = self.getControl(3226)
        listItem = xbmcgui.ListItem("Year:", str(item.get("ProductionYear")))
        infoList.addItem(listItem)
        listItem = xbmcgui.ListItem("Rating:", str(item.get("CommunityRating")))
        infoList.addItem(listItem)          
        listItem = xbmcgui.ListItem("MPAA:", str(item.get("OfficialRating")))
        infoList.addItem(listItem)   
        duration = str(int(item.get("RunTimeTicks", "0"))/(10000000*60))
        listItem = xbmcgui.ListItem("RunTime:", str(duration) + " Minutes")
        infoList.addItem(listItem) 
         
        genre = ""
        genres = item.get("Genres")
        if genres != None and genres != []:
            for genre_string in genres:
                if genre == "": #Just take the first genre
                    genre = genre_string
                else:
                    genre = genre + " / " + genre_string
        elif item.get("SeriesGenres") != None and item.get("SeriesGenres") != '':
            genres = item.get("SeriesGenres")
            if genres != None and genres != []:
              for genre_string in genres:
                if genre == "": #Just take the first genre
                    genre = genre_string
                else:
                    genre = genre + " / " + genre_string     

        genrelistItem = xbmcgui.ListItem("Genre:", genre)
        genrelistItem2 = xbmcgui.ListItem("Genre:", genre)
        infoList.addItem(genrelistItem) 
        
        path = item.get('Path')
        pathlistItem = xbmcgui.ListItem("Path:", path)
        pathlistItem2 = xbmcgui.ListItem("Path:", path)  
        infoList.addItem(pathlistItem)
       
        if item.get("CriticRating") != None:
            listItem = xbmcgui.ListItem("CriticRating:", str(item.get("CriticRating")))
            infoList.addItem(listItem)
               
        # Process Studio
        studio = "" 
        if item.get("SeriesStudio") != None and item.get("SeriesStudio") != '':
            studio = item.get("SeriesStudio")
        if studio == "":        
            studios = item.get("Studios")
            if(studios != None):
                for studio_string in studios:
                    if studio=="": #Just take the first one
                        temp=studio_string.get("Name")
                        studio=temp.encode('utf-8')
        
        if studio != "":
            listItem = xbmcgui.ListItem("Studio:", studio)
            infoList.addItem(listItem)
        
        if item.get("Metascore") != None:
          listItem = xbmcgui.ListItem("Metascore:", str(item.get("Metascore")))
          infoList.addItem(listItem)
          
        playCount = 0
        if(userData != None and userData.get("Played") == True):
            playCount = 1
        listItem = xbmcgui.ListItem("PlayedCount:", str(playCount))
        infoList.addItem(listItem)
        
        if item.get("ProviderIds") != None and item.get("ProviderIds").get("Imdb") != None and type == "Movie":
            listItem = xbmcgui.ListItem("ID:", item.get("ProviderIds").get("Imdb"))
            infoList.addItem(listItem)
        elif item.get("ProviderIds") != None and item.get("ProviderIds").get("Tvdb") != None and type == "Series":
            listItem = xbmcgui.ListItem("ID:", item.get("ProviderIds").get("Tvdb"))
            infoList.addItem(listItem)
        elif (type == "Episode" or type == "Season"):
            jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + item.get("SeriesId") + "?Fields=SeriesGenres,AirTime&format=json", suppress=False, popup=1 )     
            seriesitem = json.loads(jsonData)
            if seriesitem.get("ProviderIds") != None and seriesitem.get("ProviderIds").get("Tvdb") != None:
              listItem = xbmcgui.ListItem("ID:", seriesitem.get("ProviderIds").get("Tvdb"))
              infoList.addItem(listItem)
        
        # alternate list 
        try:
            alternateList = self.getControl(3291)
            if alternateList != None:
                if directorlistItem != None:
                   alternateList.addItem(directorlistItem)
                if writerlistItem != None:
                   alternateList.addItem(writerlistItem)
                alternateList.addItem(genrelistItem2)
                if item.get("ProductionLocations") !=None and item.get("ProductionLocations") != []:
                   listItem = xbmcgui.ListItem("Country:", item.get("ProductionLocations")[0])
                   alternateList.addItem(listItem)
                elif item.get("AirTime") !=None:
                   listItem = xbmcgui.ListItem("Air Time:", item.get("AirTime"))
                   alternateList.addItem(listItem)
                if(item.get("PremiereDate") != None):
                   premieredatelist = (item.get("PremiereDate")).split("T")
                   premieredate = premieredatelist[0]
                   listItem = xbmcgui.ListItem("Premiered Date:", premieredate)
                   alternateList.addItem(listItem)
                alternateList.addItem(pathlistItem2)
                
        except:
            pass     
     
        # add resume percentage text to name
        addResumePercent = __settings__.getSetting('addResumePercent') == 'true'
        if (addResumePercent and cappedPercentage != 0):
            name = name + " (" + str(cappedPercentage) + "%)"

        self.getControl(3000).setLabel(name)
        self.getControl(3003).setLabel(episodeInfo)
        self.getControl(3001).setImage(fanArt)
        
        try:
            discartImageControl = self.getControl(3091)
            artImageControl = self.getControl(3092)
            thumbImageControl = self.getControl(3093)
            if discartImageControl != None and artImageControl != None and thumbImageControl != None:
                if discart != '':
                  self.getControl(3091).setImage(discart)
                  self.getControl(3092).setVisible(False)
                  self.getControl(3093).setVisible(False)
                else:
                  self.getControl(3091).setVisible(False)
                  art = db.get(id +".Art")
                  if (artImageControl != None):
                      if art != '':
                          self.getControl(3092).setImage(art)
                          self.getControl(3093).setVisible(False)
                      else:
                          self.getControl(3092).setVisible(False)
                          if (type == "Episode"):
                              thumb = db.get(item.get("SeriesId") +".Thumb")
                          else:
                              thumb = db.get(id +".Thumb")
                          if (thumbImageControl != None):
                              if thumb != '':
                                  self.getControl(3093).setImage(thumb)
                              else:
                                  self.getControl(3093).setVisible(False)
                          
                  
        except:
            pass 
        
        if(type == "Episode"):
            # null_pointer - I have removed this in favor of letting the user chose from the setting and using the "poster" type in the above image url create
            #image = self.downloadUtils.getArtwork(seriesitem, "Primary")
            seriesimage = db.get(item.get("SeriesId") + ".Primary3")
            try:
                self.getControl(3099).setImage(seriesimage)
            except:
                pass
            
            self.getControl(3009).setImage(image)
            if(cappedPercentage != None):
                self.getControl(3010).setImage("Progress\progress_" + str(cappedPercentage) + ".png")
        else:
            self.getControl(3011).setImage(image)
            if(cappedPercentage != None):
                self.getControl(3012).setImage("Progress\progress_" + str(cappedPercentage) + ".png")
                
        # disable play button
        if(type == "Season" or type == "Series"):
            self.setFocusId(3226)
            self.getControl(3002).setEnabled(False)                 

Example 17

Project: cognitive Source File: results_local.py
    def run(self):
        print "Run called for thread name", self.name, "End component", self.comp_id
        exp = Experiment.objects.get(pk=self.experiment)
        graph = exp.workflow.graph_data
        graph_data = {}
        print graph
        status = "success"
        err_msg = ""
        tmp = graph.split(',')

        for elem in tmp:
            node = elem.split(":")
            if len(node) > 1:
                first_node = node[0]
                second_node = node[1]
            else:
                first_node = node[0]
                second_node = ''
            if second_node in graph_data:
                depend_nodes = graph_data[second_node]
                depend_nodes.add(first_node)
            else:
                graph_data[second_node] = set()
                graph_data[second_node].add(first_node)

        topological_graph = toposort_flatten(graph_data)
        print "Graph after topological sort", topological_graph

        if self.experiment in CACHE:
            input_data = CACHE[self.experiment]
        else:
            input_data = DataFrame

        feature_names = None
        feature_types = None
        output_data = None

        for data in topological_graph:
            component_id = int(data)
            comp = Component.objects.get(pk=component_id)
            print "Component_id", component_id, " ", comp.operation_type
            op = comp.operation_type

            if op.function_type == 'Create':
                if op.function_arg == 'Table':
                    if op.function_subtype == 'Input':
                        filename = op.function_subtype_arg
                        input_data = read_csv(filename)
                        feature_names = input_data.columns

                # TODO: [refactor] elif?
                if op.function_arg == 'Row':
                    if op.function_subtype == 'Row':
                        row_values = json.loads(op.function_subtype_arg)
                        input_data.loc[len(input_data) + 1] = row_values

                if op.function_arg == 'Model':
                    if op.function_subtype == 'Train-Test':
                        params = json.loads(op.function_subtype_arg)
                        train_data_percentage = int(params["train_data_percentage"])
                        target_column = int(params["target_column"])
                        model_type = op.function_arg_id
                        print model_type, train_data_percentage, target_column
                        target_feature = feature_names[target_column]
                        try:
                            actual_target_column = input_data.columns.get_loc(target_feature)
                            input_feature_columns = range(len(input_data.columns))
                            input_feature_columns.remove(actual_target_column)
                            input_features = input_data.columns[input_feature_columns]
                            classifier = Classifier(
                                input_data, model_type, train_data_percentage,
                                input_features, target_feature)
                            output_data = classifier.learn()
                        except ValueError as e:
                            status = "failure"
                            err_msg = " Invalid input for the model training"
                        except KeyError as e:
                            status = "failure"
                            err_msg = target_feature + " column is not available for Model Training"

            # TODO: [refactor] elif?
            if op.function_type == 'Update':
                if op.function_arg == 'Table':
                    if op.function_subtype == 'Metadata':
                        feature_types = json.loads(op.function_subtype_arg)
                        print "Feature Names", feature_names, " Feature_types ", feature_types

                if op.function_arg == 'Column':
                    if op.function_subtype == 'Add':
                        constant_value = float(op.function_subtype_arg)
                        column_id = float(op.function_arg_id)
                        column_name = feature_names[column_id]
                        if column_name not in input_data:
                            #print "Column name ", column_name, " not present. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = column_name + " column is not available for current operation"
                        elif input_data[column_name].dtype == 'object':
                            #print "Column name ", column_name, " is not integer/float. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = " Invalid input in column "+ column_name+ " for the current operation"
                        else:
                            input_data[column_name] += constant_value
                    if op.function_subtype == 'Sub':
                        constant_value = float(op.function_subtype_arg)
                        column_id = float(op.function_arg_id)
                        column_name = feature_names[column_id]
                        if column_name not in input_data:
                            #print "Column name ", column_name, " not present. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = column_name + " column is not available for current operation"
                        elif input_data[column_name].dtype == 'object':
                            #print "Column name ", column_name, " is not integer/float. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = " Invalid input in column "+ column_name+ " for the current operation"
                        else:
                            input_data[column_name] -= constant_value
                    if op.function_subtype == 'Mult':
                        constant_value = float(op.function_subtype_arg)
                        column_id = float(op.function_arg_id)
                        column_name = feature_names[column_id]
                        if column_name not in input_data:
                            #print "Column name ", column_name, " not present. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = column_name + " column is not available for current operation"
                        elif input_data[column_name].dtype == 'object':
                            #print "Column name ", column_name, " is not integer/float. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = " Invalid input in column "+ column_name+ " for the current operation"
                        else:
                            input_data[column_name] *= constant_value
                    if op.function_subtype == 'Div':
                        constant_value = float(op.function_subtype_arg)
                        column_id = float(op.function_arg_id)
                        column_name = feature_names[column_id]
                        if column_name not in input_data:
                            #print "Column name ", column_name, " not present. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = column_name + " column is not available for current operation"
                        elif input_data[column_name].dtype == 'object':
                            #print "Column name ", column_name, " is not integer/float. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = " Invalid input in column "+ column_name+ " for the current operation"
                        else:
                            input_data[column_name] /= constant_value
                    if op.function_subtype == 'Normalize':
                        column_id = float(op.function_arg_id)
                        column_name = feature_names[column_id]
                        sum_array = input_data.sum(axis=0)
                        if column_name not in sum_array:
                            #print "Column name ", column_name, " not present. Skipping"
                            #continue  # throw error in module status
                            status = "failure"
                            err_msg = column_name + " column is not available for current operation"
                        else:
                            normalization_value = sum_array[column_name]
                            input_data[column_name] = input_data[column_name] / normalization_value

            # TODO: [refactor] elif?
            if op.function_type == 'Filter':
                if op.function_arg == 'Table':
                    if op.function_subtype == 'Project':
                        column_id_list = json.loads(op.function_arg_id)
                        excluded_columns = range(len(feature_names))
                        for elem in column_id_list:  # Bug: Calling Projection twice will break indexing logic
                            excluded_columns.remove(elem)
                        excluded_columns = [x for x in excluded_columns if feature_names[x] in input_data]
                        print "Excluded columns ", excluded_columns
                        if excluded_columns:
                            input_data = input_data.drop(feature_names[excluded_columns], axis=1)
                    if op.function_subtype == 'RemoveDup':
                        column_id_list = json.loads(op.function_arg_id)
                        column_name_list = []
                        for elem in column_id_list:
                            column_name = feature_names[elem]
                            if column_name not in input_data:
                                #print "Column name ", column_name, " not present. Skipping"
                                #continue  # throw error in module status
                                status = "failure"
                                err_msg = column_name + " column is not available for current operation"
                            else:
                                column_name_list.append(column_name)
                        if column_name_list:
                            input_data = input_data.drop_duplicates(subset=column_name_list)
                    if op.function_subtype == 'RemoveMissing':
                        if op.function_subtype_arg == 'Replace_mean':
                            input_data = input_data.fillna(input_data.mean().round(2))
                        if op.function_subtype_arg == 'Replace_median':
                            input_data = input_data.fillna(input_data.median().round(2))
                        if op.function_subtype_arg == 'Replace_mode':
                            input_data = input_data.fillna(input_data.mode())
                        if op.function_subtype_arg == 'Drop_row':
                            input_data = input_data.dropna(axis=0)

            if component_id == self.comp_id:
                print "End component reached"
                self.result["feature_names"] = list(input_data.columns)
                if feature_types is not None:
                    self.result["feature_types"] = feature_types
                # self.result["data"] = input_data[:self.max_results].to_json()
                self.result["data"] = []
                result_length = min(len(input_data), self.max_results)

                for i in range(result_length):
                    tmp = []
                    for col in input_data.columns:
                        if json.dumps(input_data[col][i]) == 'NaN':
                            tmp.append('')
                        else:
                            tmp.append(input_data[col][i])
                    self.result["data"].append(tmp)

                self.result["graph_data"] = []

                for name in list(input_data.columns):
                    top_uniques = Counter(list(input_data[name])).most_common(4)
                    col_names = []
                    unique_count = []
                    for val in top_uniques:
                        if json.dumps(val[0]) == 'NaN':
                            continue
                        col_names.append(val[0])
                        unique_count.append(val[1])
                    tmp = [col_names, unique_count]
                    self.result["graph_data"].append(tmp)

                if output_data is not None:
                    self.result["output"] = output_data

                self.result["status"] = status
                self.result["message"] = err_msg

                self.result["missing_values"] = list(input_data.isnull().sum().values)
                mean = input_data.mean().round(2)
                median = input_data.median().round(2)
                self.result["mean"] = []
                self.result["median"] = []

                for elem in input_data.columns:
                    if elem in mean:
                        self.result["mean"].append(mean[elem])
                    else:
                        self.result["mean"].append('')
                    if elem in median:
                        self.result["median"].append(median[elem])
                    else:
                        self.result["median"].append('')

                self.result["unique_values"] = []

                for elem in input_data.columns:
                    self.result["unique_values"].append(input_data[elem].nunique())

                self.result["min"] = []
                self.result["max"] = []
                self.result["std"] = []
                self.result["25_quartile"] = []
                self.result["50_quartile"] = []
                self.result["75_quartile"] = []
                metric_val = input_data.describe()

                for elem in input_data.columns:
                    if elem in metric_val:
                        val = metric_val[elem].round(2)
                        self.result["min"].append(val["min"])
                        self.result["max"].append(val["max"])
                        self.result["std"].append(val["std"])
                        self.result["25_quartile"].append(val["25%"])
                        self.result["50_quartile"].append(val["50%"])
                        self.result["75_quartile"].append(val["75%"])
                    else:
                        self.result["min"].append('')
                        self.result["max"].append('')
                        self.result["std"].append('')
                        self.result["25_quartile"].append('')
                        self.result["50_quartile"].append('')
                        self.result["75_quartile"].append('')

                self.result["total_rows"] = input_data.shape[0]
                self.result["total_columns"] = input_data.shape[1]

                if self.cache_results is True:
                    CACHE[self.experiment] = input_data

                #print self.result
                print self.result["status"]
                print self.result["message"]
                break

Example 18

Project: edx2bigquery Source File: analyze_content.py
def analyze_course_content(course_id, 
                           listings_file=None,
                           basedir="X-Year-2-data-sql", 
                           datedir="2013-09-21", 
                           use_dataset_latest=False,
                           do_upload=False,
                           courses=None,
                           verbose=True,
                           pin_date=None,
                           ):
    '''
    Compute course_content table, which quantifies:

    - number of chapter, sequential, vertical modules
    - number of video modules
    - number of problem, *openended, mentoring modules
    - number of dicussion, annotatable, word_cloud modules

    Do this using the course "xbundle" file, produced when the course axis is computed.

    Include only modules which had nontrivial use, to rule out the staff and un-shown content. 
    Do the exclusion based on count of module appearing in the studentmodule table, based on 
    stats_module_usage for each course.

    Also, from the course listings file, compute the number of weeks the course was open.

    If do_upload (triggered by --force-recompute) then upload all accuemulated data to the course report dataset 
    as the "stats_course_content" table.  Also generate a "course_summary_stats" table, stored in the
    course_report_ORG or course_report_latest dataset.  The course_summary_stats table combines
    data from many reports,, including stats_course_content, the medians report, the listings file,
    broad_stats_by_course, and time_on_task_stats_by_course.
    
    '''

    if do_upload:
        if use_dataset_latest:
            org = "latest"
        else:
            org = courses[0].split('/',1)[0]	# extract org from first course_id in courses

        crname = 'course_report_%s' % org

        gspath = gsutil.gs_path_from_course_id(crname)
        gsfnp = gspath / CCDATA
        gsutil.upload_file_to_gs(CCDATA, gsfnp)
        tableid = "stats_course_content"
        dataset = crname

        mypath = os.path.dirname(os.path.realpath(__file__))
        SCHEMA_FILE = '%s/schemas/schema_content_stats.json' % mypath

        try:
            the_schema = json.loads(open(SCHEMA_FILE).read())[tableid]
        except Exception as err:
            print "Oops!  Failed to load schema file for %s.  Error: %s" % (tableid, str(err))
            raise

        if 0:
            bqutil.load_data_to_table(dataset, tableid, gsfnp, the_schema, wait=True, verbose=False,
                                      format='csv', skiprows=1)

        table = 'course_metainfo'
        course_tables = ',\n'.join([('[%s.course_metainfo]' % bqutil.course_id2dataset(x)) for x in courses])
        sql = "select * from {course_tables}".format(course_tables=course_tables)
        print "--> Creating %s.%s using %s" % (dataset, table, sql)

        if 1:
            metainfo_dataset = bqutil.get_bq_table(dataset, table, sql=sql, 
                                          newer_than=datetime.datetime(2015, 1, 16, 3, 0),
                                          )
            # bqutil.create_bq_table(dataset, table, sql, overwrite=True)


        #-----------------------------------------------------------------------------
        # make course_summary_stats table
        #
        # This is a combination of the broad_stats_by_course table (if that exists), and course_metainfo.
        # Also use (and create if necessary) the nregistered_by_wrap table.

        # get the broad_stats_by_course data
        bsbc = bqutil.get_table_data(dataset, 'broad_stats_by_course')

        table_list = bqutil.get_list_of_table_ids(dataset)

        latest_person_course = max([ x for x in table_list if x.startswith('person_course_')])
        print "Latest person_course table in %s is %s" % (dataset, latest_person_course)
        
        sql = """
                SELECT pc.course_id as course_id, 
                    cminfo.wrap_date as wrap_date,
                    count(*) as nregistered,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) nregistered_by_wrap,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) / nregistered * 100 nregistered_by_wrap_pct,
                FROM
                    [{dataset}.{person_course}] as pc
                left join (
                 SELECT course_id,
                      TIMESTAMP(concat(wrap_year, "-", wrap_month, '-', wrap_day, ' 23:59:59')) as wrap_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as wrap_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as wrap_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as wrap_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Course Wrap'
                 )) as cminfo
                on pc.course_id = cminfo.course_id
                
                group by course_id, wrap_date
                order by course_id
        """.format(dataset=dataset, person_course=latest_person_course)

        nr_by_wrap = bqutil.get_bq_table(dataset, 'nregistered_by_wrap', sql=sql, key={'name': 'course_id'})

        # rates for registrants before and during course
        
        sql = """
                SELECT 
                    *,
                    ncertified / nregistered * 100 as pct_certified_of_reg,
                    ncertified_and_registered_before_launch / nregistered_before_launch * 100 as pct_certified_reg_before_launch,
                    ncertified_and_registered_during_course / nregistered_during_course * 100 as pct_certified_reg_during_course,
                    ncertified / nregistered_by_wrap * 100 as pct_certified_of_reg_by_wrap,
                    ncertified / nviewed * 100 as pct_certified_of_viewed,
                    ncertified / nviewed_by_wrap * 100 as pct_certified_of_viewed_by_wrap,
                    ncertified_by_ewrap / nviewed_by_ewrap * 100 as pct_certified_of_viewed_by_ewrap,
                FROM
                (
                # ------------------------
                # get aggregate data
                SELECT pc.course_id as course_id, 
                    cminfo.wrap_date as wrap_date,
                    count(*) as nregistered,
                    sum(case when pc.certified then 1 else 0 end) ncertified,
                    sum(case when (TIMESTAMP(pc.cert_created_date) < cminfo.ewrap_date) and (pc.certified and pc.viewed) then 1 else 0 end) ncertified_by_ewrap,
                    sum(case when pc.viewed then 1 else 0 end) nviewed,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) nregistered_by_wrap,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) / nregistered * 100 nregistered_by_wrap_pct,
                    sum(case when (pc.start_time < cminfo.wrap_date) and pc.viewed then 1 else 0 end) nviewed_by_wrap,
                    sum(case when (pc.start_time < cminfo.ewrap_date) and pc.viewed then 1 else 0 end) nviewed_by_ewrap,
                    sum(case when pc.start_time < cminfo.launch_date then 1 else 0 end) nregistered_before_launch,
                    sum(case when pc.start_time < cminfo.launch_date 
                              and pc.certified
                              then 1 else 0 end) ncertified_and_registered_before_launch,
                    sum(case when (pc.start_time >= cminfo.launch_date) 
                              and (pc.start_time < cminfo.wrap_date) then 1 else 0 end) nregistered_during_course,
                    sum(case when (pc.start_time >= cminfo.launch_date) 
                              and (pc.start_time < cminfo.wrap_date) 
                              and pc.certified
                              then 1 else 0 end) ncertified_and_registered_during_course,
                FROM
                    [{dataset}.{person_course}] as pc
                left join (
                
                # --------------------
                #  get course launch and wrap dates from course_metainfo

       SELECT AA.course_id as course_id, 
              AA.wrap_date as wrap_date,
              AA.launch_date as launch_date,
              BB.ewrap_date as ewrap_date,
       FROM (
               #  inner get course launch and wrap dates from course_metainfo
                SELECT A.course_id as course_id,
                  A.wrap_date as wrap_date,
                  B.launch_date as launch_date,
                from
                (
                 SELECT course_id,
                      TIMESTAMP(concat(wrap_year, "-", wrap_month, '-', wrap_day, ' 23:59:59')) as wrap_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as wrap_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as wrap_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as wrap_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Course Wrap'
                 )
                ) as A
                left outer join 
                (
                 SELECT course_id,
                      TIMESTAMP(concat(launch_year, "-", launch_month, '-', launch_day)) as launch_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as launch_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as launch_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as launch_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Course Launch'
                 )
                ) as B
                on A.course_id = B.course_id 
                # end inner course_metainfo subquery
            ) as AA
            left outer join
            (
                 SELECT course_id,
                      TIMESTAMP(concat(wrap_year, "-", wrap_month, '-', wrap_day, ' 23:59:59')) as ewrap_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as wrap_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as wrap_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as wrap_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Empirical Course Wrap'
                 )
            ) as BB
            on AA.course_id = BB.course_id

                # end course_metainfo subquery
                # --------------------
                
                ) as cminfo
                on pc.course_id = cminfo.course_id
                
                group by course_id, wrap_date
                order by course_id
                # ---- end get aggregate data
                )
                order by course_id
        """.format(dataset=dataset, person_course=latest_person_course)

        print "--> Assembling course_summary_stats from %s" % 'stats_cert_rates_by_registration'
        sys.stdout.flush()
        cert_by_reg = bqutil.get_bq_table(dataset, 'stats_cert_rates_by_registration', sql=sql, 
                                          newer_than=datetime.datetime(2015, 1, 16, 3, 0),
                                          key={'name': 'course_id'})

        # start assembling course_summary_stats

        c_sum_stats = defaultdict(OrderedDict)
        for entry in bsbc['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            cmci.update(entry)
            cnbw = nr_by_wrap['data_by_key'][course_id]
            nbw = int(cnbw['nregistered_by_wrap'])
            cmci['nbw_wrap_date'] = cnbw['wrap_date']
            cmci['nregistered_by_wrap'] = nbw
            cmci['nregistered_by_wrap_pct'] = cnbw['nregistered_by_wrap_pct']
            cmci['frac_female'] = float(entry['n_female_viewed']) / (float(entry['n_male_viewed']) + float(entry['n_female_viewed']))
            ncert = float(cmci['certified_sum'])
            if ncert:
                cmci['certified_of_nregistered_by_wrap_pct'] = nbw / ncert * 100.0
            else:
                cmci['certified_of_nregistered_by_wrap_pct'] = None
            cbr = cert_by_reg['data_by_key'][course_id]
            for field, value in cbr.items():
                cmci['cbr_%s' % field] = value

        # add medians for viewed, explored, and certified

        msbc_tables = {'msbc_viewed': "viewed_median_stats_by_course",
                       'msbc_explored': 'explored_median_stats_by_course',
                       'msbc_certified': 'certified_median_stats_by_course',
                       'msbc_verified': 'verified_median_stats_by_course',
                       }
        for prefix, mtab in msbc_tables.items():
            print "--> Merging median stats data from %s" % mtab
            sys.stdout.flush()
            bqdat = bqutil.get_table_data(dataset, mtab)
            for entry in bqdat['data']:
                course_id = entry['course_id']
                cmci = c_sum_stats[course_id]
                for field, value in entry.items():
                    cmci['%s_%s' % (prefix, field)] = value

        # add time on task data

        tot_table = "time_on_task_stats_by_course"
        prefix = "ToT"
        print "--> Merging time on task data from %s" % tot_table
        sys.stdout.flush()
        try:
            bqdat = bqutil.get_table_data(dataset, tot_table)
        except Exception as err:
            bqdat = {'data': {}}
        for entry in bqdat['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            for field, value in entry.items():
                if field=='course_id':
                    continue
                cmci['%s_%s' % (prefix, field)] = value

        # add serial time on task data

        tot_table = "time_on_task_serial_stats_by_course"
        prefix = "SToT"
        print "--> Merging serial time on task data from %s" % tot_table
        sys.stdout.flush()
        try:
            bqdat = bqutil.get_table_data(dataset, tot_table)
        except Exception as err:
            bqdat = {'data': {}}
        for entry in bqdat['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            for field, value in entry.items():
                if field=='course_id':
                    continue
                cmci['%s_%s' % (prefix, field)] = value

        # add show_answer stats

        tot_table = "show_answer_stats_by_course"
        prefix = "SAS"
        print "--> Merging show_answer stats data from %s" % tot_table
        sys.stdout.flush()
        try:
            bqdat = bqutil.get_table_data(dataset, tot_table)
        except Exception as err:
            bqdat = {'data': {}}
        for entry in bqdat['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            for field, value in entry.items():
                if field=='course_id':
                    continue
                cmci['%s_%s' % (prefix, field)] = value

        # setup list of keys, for CSV output

        css_keys = c_sum_stats.values()[0].keys()

        # retrieve course_metainfo table, pivot, add that to summary_stats

        print "--> Merging course_metainfo from %s" % table
        sys.stdout.flush()
        bqdat = bqutil.get_table_data(dataset, table)

        listings_keys = map(make_key, ["Institution", "Semester", "New or Rerun", "Andrew Recodes New/Rerun", 
                                       "Course Number", "Short Title", "Andrew's Short Titles", "Title", 
                                       "Instructors", "Registration Open", "Course Launch", "Course Wrap", "course_id",
                                       "Empirical Course Wrap", "Andrew's Order", "certifies", "MinPassGrade",
                                       '4-way Category by name', "4-way (CS, STEM, HSocSciGov, HumHistRel)"
                                       ])
        listings_keys.reverse()
        
        for lk in listings_keys:
            css_keys.insert(1, "listings_%s" % lk)

        COUNTS_TO_KEEP = ['discussion', 'problem', 'optionresponse', 'checkboxgroup', 'optioninput', 
                          'choiceresponse', 'video', 'choicegroup', 'vertical', 'choice', 'sequential', 
                          'multiplechoiceresponse', 'numericalresponse', 'chapter', 'solution', 'img', 
                          'formulaequationinput', 'responseparam', 'selfassessment', 'track', 'task', 'rubric', 
                          'stringresponse', 'combinedopenended', 'description', 'textline', 'prompt', 'category', 
                          'option', 'lti', 'annotationresponse', 
                          'annotatable', 'colgroup', 'tag_prompt', 'comment', 'annotationinput', 'image', 
                          'options', 'comment_prompt', 'conditional', 
                          'answer', 'poll_question', 'section', 'wrapper', 'map', 'area', 
                          'customtag', 'transcript', 
                          'split_test', 'word_cloud', 
                          'openended', 'openendedparam', 'answer_display', 'code', 
                          'drag_and_drop_input', 'customresponse', 'draggable', 'mentoring', 
                          'textannotation', 'imageannotation', 'videosequence', 
                          'feedbackprompt', 'assessments', 'openassessment', 'assessment', 'explanation', 'criterion']

        for entry in bqdat['data']:
            thekey = make_key(entry['key'])
            # if thekey.startswith('count_') and thekey[6:] not in COUNTS_TO_KEEP:
            #     continue
            if thekey.startswith('listings_') and thekey[9:] not in listings_keys:
                # print "dropping key=%s for course_id=%s" % (thekey, entry['course_id'])
                continue
            c_sum_stats[entry['course_id']][thekey] = entry['value']
            #if 'certifies' in thekey:
            #    print "course_id=%s, key=%s, value=%s" % (entry['course_id'], thekey, entry['value'])
            if thekey not in css_keys:
                css_keys.append(thekey)

        # compute forum_posts_per_week
        for course_id, entry in c_sum_stats.items():
            nfps = entry.get('nforum_posts_sum', 0)
            if nfps:
                fppw = int(nfps) / float(entry['nweeks'])
                entry['nforum_posts_per_week'] = fppw
                print "    course: %s, assessments_per_week=%s, forum_posts_per_week=%s" % (course_id, entry['total_assessments_per_week'], fppw)
            else:
                entry['nforum_posts_per_week'] = None
        css_keys.append('nforum_posts_per_week')

        # read in listings file and merge that in also
        if listings_file:
            if listings_file.endswith('.csv'):
                listings = csv.DictReader(open(listings_file))
            else:
                listings = [ json.loads(x) for x in open(listings_file) ]
            for entry in listings:
                course_id = entry['course_id']
                if course_id not in c_sum_stats:
                    continue
                cmci = c_sum_stats[course_id]
                for field, value in entry.items():
                    lkey = "listings_%s" % make_key(field)
                    if not (lkey in cmci) or (not cmci[lkey]):
                        cmci[lkey] = value

        print "Storing these fields: %s" % css_keys

        # get schema
        mypath = os.path.dirname(os.path.realpath(__file__))
        the_schema = json.loads(open('%s/schemas/schema_combined_course_summary_stats.json' % mypath).read())
        schema_dict = { x['name'] : x for x in the_schema }

        # write out CSV
        css_table = "course_summary_stats"
        ofn = "%s__%s.csv" % (dataset, css_table)
        ofn2 = "%s__%s.json" % (dataset, css_table)
        print "Writing data to %s and %s" % (ofn, ofn2)

        ofp = open(ofn, 'w')
        ofp2 = open(ofn2, 'w')
        dw = csv.DictWriter(ofp, fieldnames=css_keys)
        dw.writeheader()
        for cid, entry in c_sum_stats.items():
            for ek in entry:
                if ek not in schema_dict:
                    entry.pop(ek)
                # entry[ek] = str(entry[ek])	# coerce to be string
            ofp2.write(json.dumps(entry) + "\n")
            for key in css_keys:
                if key not in entry:
                    entry[key] = None
            dw.writerow(entry)
        ofp.close()
        ofp2.close()

        # upload to bigquery
        # the_schema = [ { 'type': 'STRING', 'name': x } for x in css_keys ]
        if 1:
            gsfnp = gspath / dataset / (css_table + ".json")
            gsutil.upload_file_to_gs(ofn2, gsfnp)
            # bqutil.load_data_to_table(dataset, css_table, gsfnp, the_schema, wait=True, verbose=False,
            #                           format='csv', skiprows=1)
            bqutil.load_data_to_table(dataset, css_table, gsfnp, the_schema, wait=True, verbose=False)

        return

    
    print "-"*60 + " %s" % course_id

    # get nweeks from listings
    lfn = path(listings_file)
    if not lfn.exists():
        print "[analyze_content] course listings file %s doesn't exist!" % lfn
        return

    data = None
    if listings_file.endswith('.json'):
        data_feed = map(json.loads, open(lfn))
    else:
        data_feed = csv.DictReader(open(lfn))
    for k in data_feed:
        if not 'course_id' in k:
            print "Strange course listings row, no course_id in %s" % k
            raise Exception("Missing course_id")
        if k['course_id']==course_id:
            data = k
            break

    if not data:
        print "[analyze_content] no entry for %s found in course listings file %s!" % (course_id, lfn)
        return

    def date_parse(field):
        (m, d, y) = map(int, data[field].split('/'))
        return datetime.datetime(y, m, d)

    launch = date_parse('Course Launch')
    wrap = date_parse('Course Wrap')
    ndays = (wrap - launch).days
    nweeks = ndays / 7.0

    print "Course length = %6.2f weeks (%d days)" % (nweeks, ndays)

    if pin_date:
        datedir = pin_date
    course_dir = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest and not pin_date)
    cfn = gsutil.path_from_course_id(course_id)

    xbfn = course_dir / ("xbundle_%s.xml" % cfn)
    
    if not xbfn.exists():
        print "[analyze_content] cannot find xbundle file %s for %s!" % (xbfn, course_id)

        if use_dataset_latest:
            # try looking in earlier directories for xbundle file
            import glob
            spath = course_dir / ("../*/xbundle_%s.xml" % cfn)
            files = list(glob.glob(spath))
            if files:
                xbfn = path(files[-1])
            if not xbfn.exists():
                print "   --> also cannot find any %s ; aborting!" % spath
            else:
                print "   --> Found and using instead: %s " % xbfn
        if not xbfn.exists():
            raise Exception("[analyze_content] missing xbundle file %s" % xbfn)

    # if there is an xbundle*.fixed file, use that instead of the normal one
    if os.path.exists(str(xbfn) + ".fixed"):
        xbfn = path(str(xbfn) + ".fixed")

    print "[analyze_content] For %s using %s" % (course_id, xbfn)
    
    # get module usage data
    mudata = get_stats_module_usage(course_id, basedir, datedir, use_dataset_latest)

    xml = etree.parse(open(xbfn)).getroot()
    
    counts = defaultdict(int)
    nexcluded = defaultdict(int)

    IGNORE = ['html', 'p', 'div', 'iframe', 'ol', 'li', 'ul', 'blockquote', 'h1', 'em', 'b', 'h2', 'h3', 'body', 'span', 'strong',
              'a', 'sub', 'strike', 'table', 'td', 'tr', 's', 'tbody', 'sup', 'sub', 'strike', 'i', 's', 'pre', 'policy', 'metadata',
              'grading_policy', 'br', 'center',  'wiki', 'course', 'font', 'tt', 'it', 'dl', 'startouttext', 'endouttext', 'h4', 
              'head', 'source', 'dt', 'hr', 'u', 'style', 'dd', 'script', 'th', 'p', 'P', 'TABLE', 'TD', 'small', 'text', 'title']

    problem_stats = defaultdict(int)

    def does_problem_have_random_script(problem):
        '''
        return 1 if problem has a script with "random." in it
        else return 0
        '''
        for elem in problem.findall('.//script'):
            if elem.text and ('random.' in elem.text):
                return 1
        return 0

    # walk through xbundle 
    def walk_tree(elem, policy=None):
        '''
        Walk XML tree recursively.
        elem = current element
        policy = dict of attributes for children to inherit, with fields like due, graded, showanswer
        '''
        policy = policy or {}
        if  type(elem.tag)==str and (elem.tag.lower() not in IGNORE):
            counts[elem.tag.lower()] += 1
        if elem.tag in ["sequential", "problem", "problemset", "course", "chapter"]:	# very old courses may use inheritance from course & chapter
            keys = ["due", "graded", "format", "showanswer", "start"]
            for k in keys:		# copy inheritable attributes, if they are specified
                val = elem.get(k)
                if val:
                    policy[k] = val
        if elem.tag=="problem":	# accuemulate statistics about problems: how many have show_answer = [past_due, closed] ?  have random. in script?
            problem_stats['n_capa_problems'] += 1
            if policy.get('showanswer'):
                problem_stats["n_showanswer_%s" % policy.get('showanswer')] += 1
            else:
                problem_stats['n_shownanswer_finished'] += 1	# DEFAULT showanswer = finished  (make sure this remains true)
                # see https://github.com/edx/edx-platform/blob/master/common/lib/xmodule/xmodule/capa_base.py#L118
                # finished = Show the answer after the student has answered the problem correctly, the student has no attempts left, or the problem due date has passed.
            problem_stats['n_random_script'] += does_problem_have_random_script(elem)

            if policy.get('graded')=='true' or policy.get('graded')=='True':
                problem_stats['n_capa_problems_graded'] += 1
                problem_stats['n_graded_random_script'] += does_problem_have_random_script(elem)
                if policy.get('showanswer'):
                    problem_stats["n_graded_showanswer_%s" % policy.get('showanswer')] += 1
                else:
                    problem_stats['n_graded_shownanswer_finished'] += 1	# DEFAULT showanswer = finished  (make sure this remains true)
            
        for k in elem:
            midfrag = (k.tag, k.get('url_name_orig', None))
            if (midfrag in mudata) and int(mudata[midfrag]['ncount']) < 20:
                nexcluded[k.tag] += 1
                if verbose:
                    try:
                        print "    -> excluding %s (%s), ncount=%s" % (k.get('display_name', '<no_display_name>').encode('utf8'), 
                                                                       midfrag, 
                                                                       mudata.get(midfrag, {}).get('ncount'))
                    except Exception as err:
                        print "    -> excluding ", k
                continue
            walk_tree(k, policy.copy())

    walk_tree(xml)
    print "--> Count of individual element tags throughout XML: ", counts
    
    print "--> problem_stats:", json.dumps(problem_stats, indent=4)

    # combine some into "qual_axis" and others into "quant_axis"
    qual_axis = ['openassessment', 'optionresponse', 'multiplechoiceresponse', 
                 # 'discussion', 
                 'choiceresponse', 'word_cloud', 
                 'combinedopenended', 'choiceresponse', 'stringresponse', 'textannotation', 'openended', 'lti']
    quant_axis = ['formularesponse', 'numericalresponse', 'customresponse', 'symbolicresponse', 'coderesponse',
                  'imageresponse']

    nqual = 0
    nquant = 0
    for tag, count in counts.items():
        if tag in qual_axis:
            nqual += count
        if tag in quant_axis:
            nquant += count
    
    print "nqual=%d, nquant=%d" % (nqual, nquant)

    nqual_per_week = nqual / nweeks
    nquant_per_week = nquant / nweeks
    total_per_week = nqual_per_week + nquant_per_week

    print "per week: nqual=%6.2f, nquant=%6.2f total=%6.2f" % (nqual_per_week, nquant_per_week, total_per_week)

    # save this overall data in CCDATA
    lock_file(CCDATA)
    ccdfn = path(CCDATA)
    ccd = {}
    if ccdfn.exists():
        for k in csv.DictReader(open(ccdfn)):
            ccd[k['course_id']] = k
    
    ccd[course_id] = {'course_id': course_id,
                      'nweeks': nweeks,
                      'nqual_per_week': nqual_per_week,
                      'nquant_per_week': nquant_per_week,
                      'total_assessments_per_week' : total_per_week,
                      }

    # fields = ccd[ccd.keys()[0]].keys()
    fields = ['course_id', 'nquant_per_week', 'total_assessments_per_week', 'nqual_per_week', 'nweeks']
    cfp = open(ccdfn, 'w')
    dw = csv.DictWriter(cfp, fieldnames=fields)
    dw.writeheader()
    for cid, entry in ccd.items():
        dw.writerow(entry)
    cfp.close()
    lock_file(CCDATA, release=True)

    # store data in course_metainfo table, which has one (course_id, key, value) on each line
    # keys include nweeks, nqual, nquant, count_* for module types *

    cmfields = OrderedDict()
    cmfields['course_id'] = course_id
    cmfields['course_length_days'] = str(ndays)
    cmfields.update({ make_key('listings_%s' % key) : value for key, value in data.items() })	# from course listings
    cmfields.update(ccd[course_id].copy())

    # cmfields.update({ ('count_%s' % key) : str(value) for key, value in counts.items() })	# from content counts

    cmfields['filename_xbundle'] = xbfn
    cmfields['filename_listings'] = lfn

    for key in sorted(counts):	# store counts in sorted order, so that the later generated CSV file can have a predictable structure
        value = counts[key]
        cmfields['count_%s' % key] =  str(value) 	# from content counts

    for key in sorted(problem_stats):	# store problem stats
        value = problem_stats[key]
        cmfields['problem_stat_%s' % key] =  str(value)

    cmfields.update({ ('nexcluded_sub_20_%s' % key) : str(value) for key, value in nexcluded.items() })	# from content counts

    course_dir = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest)
    csvfn = course_dir / CMINFO

    # manual overriding of the automatically computed fields can be done by storing course_id,key,value data
    # in the CMINFO_OVERRIDES file

    csvfn_overrides = course_dir / CMINFO_OVERRIDES
    if csvfn_overrides.exists():
        print "--> Loading manual override information from %s" % csvfn_overrides
        for ovent in csv.DictReader(open(csvfn_overrides)):
            if not ovent['course_id']==course_id:
                print "===> ERROR! override file has entry with wrong course_id: %s" % ovent
                continue
            print "    overriding key=%s with value=%s" % (ovent['key'], ovent['value'])
            cmfields[ovent['key']] = ovent['value']

    print "--> Course metainfo writing to %s" % csvfn

    fp = open(csvfn, 'w')

    cdw = csv.DictWriter(fp, fieldnames=['course_id', 'key', 'value'])
    cdw.writeheader()

    for k, v in cmfields.items():
        cdw.writerow({'course_id': course_id, 'key': k, 'value': v})
        
    fp.close()

    # build and output course_listings_and_metainfo 

    dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)

    mypath = os.path.dirname(os.path.realpath(__file__))
    clm_table = "course_listing_and_metainfo"
    clm_schema_file = '%s/schemas/schema_%s.json' % (mypath, clm_table)
    clm_schema = json.loads(open(clm_schema_file).read())

    clm = {}
    for finfo in clm_schema:
        field = finfo['name']
        clm[field] = cmfields.get(field)
    clm_fnb = clm_table + ".json"
    clm_fn = course_dir / clm_fnb
    open(clm_fn, 'w').write(json.dumps(clm))

    gsfnp = gsutil.gs_path_from_course_id(course_id, use_dataset_latest=use_dataset_latest) / clm_fnb
    print "--> Course listing + metainfo uploading to %s then to %s.%s" % (gsfnp, dataset, clm_table)
    sys.stdout.flush()
    gsutil.upload_file_to_gs(clm_fn, gsfnp)
    bqutil.load_data_to_table(dataset, clm_table, gsfnp, clm_schema, wait=True, verbose=False)

    # output course_metainfo

    table = 'course_metainfo'
    dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)

    gsfnp = gsutil.gs_path_from_course_id(course_id, use_dataset_latest=use_dataset_latest) / CMINFO
    print "--> Course metainfo uploading to %s then to %s.%s" % (gsfnp, dataset, table)
    sys.stdout.flush()

    gsutil.upload_file_to_gs(csvfn, gsfnp)

    mypath = os.path.dirname(os.path.realpath(__file__))
    SCHEMA_FILE = '%s/schemas/schema_course_metainfo.json' % mypath
    the_schema = json.loads(open(SCHEMA_FILE).read())[table]

    bqutil.load_data_to_table(dataset, table, gsfnp, the_schema, wait=True, verbose=False, format='csv', skiprows=1)

Example 19

Project: pagure Source File: test_pagure_flask_internal.py
    def test_get_branches_of_commit(self):
        ''' Test the get_branches_of_commit from the internal API. '''
        tests.create_projects(self.session)
        tests.create_projects_git(self.path)

        user = tests.FakeUser()
        user.username = 'pingou'
        with tests.user_set(pagure.APP, user):
            output = self.app.get('/test/adduser')
            self.assertEqual(output.status_code, 200)
            csrf_token = output.data.split(
                b'name="csrf_token" type="hidden" value="')[1].split(b'">')[0]

        # No CSRF token
        data = {
            'repo': 'fakerepo',
            'commit_id': 'foo',
        }
        output = self.app.post('/pv/branches/commit/', data=data)
        self.assertEqual(output.status_code, 400)
        js_data = json.loads(output.data.decode('utf-8'))
        self.assertDictEqual(
            js_data,
            {u'code': u'ERROR', u'message': u'Invalid input submitted'}
        )

        # Invalid repo
        data = {
            'repo': 'fakerepo',
            'commit_id': 'foo',
            'csrf_token': csrf_token,
        }
        output = self.app.post('/pv/branches/commit/', data=data)
        self.assertEqual(output.status_code, 404)
        js_data = json.loads(output.data.decode('utf-8'))
        self.assertDictEqual(
            js_data,
            {
                u'code': u'ERROR',
                u'message': u'No repo found with the information provided'
            }
        )

        # Rigth repo, no commit
        data = {
            'repo': 'test',
            'csrf_token': csrf_token,
        }

        output = self.app.post('/pv/branches/commit/', data=data)
        self.assertEqual(output.status_code, 400)
        js_data = json.loads(output.data.decode('utf-8'))
        self.assertDictEqual(
            js_data,
            {u'code': u'ERROR', u'message': u'No commit id submitted'}
        )

        # Request is fine, but git repo doesn't exist
        item = pagure.lib.model.Project(
            user_id=1,  # pingou
            name='test20',
            description='test project #20',
            hook_token='aaabbbhhh',
        )
        self.session.add(item)
        self.session.commit()

        data = {
            'repo': 'test20',
            'commit_id': 'foo',
            'csrf_token': csrf_token,
        }
        output = self.app.post('/pv/branches/commit/', data=data)
        self.assertEqual(output.status_code, 404)
        js_data = json.loads(output.data.decode('utf-8'))
        self.assertDictEqual(
            js_data,
            {
                u'code': u'ERROR',
                u'message': u'No git repo found with the information provided'
            }
        )

        # Create a git repo to play with
        gitrepo = os.path.join(self.path, 'test.git')
        self.assertTrue(os.path.exists(gitrepo))
        repo = pygit2.Repository(gitrepo)

        # Create a file in that git repo
        with open(os.path.join(gitrepo, 'sources'), 'w') as stream:
            stream.write('foo\n bar')
        repo.index.add('sources')
        repo.index.write()

        # Commits the files added
        tree = repo.index.write_tree()
        author = pygit2.Signature(
            'Alice Author', '[email protected]')
        committer = pygit2.Signature(
            'Cecil Committer', '[email protected]')
        repo.create_commit(
            'refs/heads/master',  # the name of the reference to update
            author,
            committer,
            'Add sources file for testing',
            # binary string representing the tree object ID
            tree,
            # list of binary strings representing parents of the new commit
            []
        )

        first_commit = repo.revparse_single('HEAD')

        # Edit the sources file again
        with open(os.path.join(gitrepo, 'sources'), 'w') as stream:
            stream.write('foo\n bar\nbaz\n boose')
        repo.index.add('sources')
        repo.index.write()

        # Commits the files added
        tree = repo.index.write_tree()
        author = pygit2.Signature(
            'Alice Author', '[email protected]')
        committer = pygit2.Signature(
            'Cecil Committer', '[email protected]')
        repo.create_commit(
            'refs/heads/feature',  # the name of the reference to update
            author,
            committer,
            'Add baz and boose to the sources\n\n There are more objects to '
            'consider',
            # binary string representing the tree object ID
            tree,
            # list of binary strings representing parents of the new commit
            [first_commit.oid.hex]
        )

        # Create another file in the master branch
        with open(os.path.join(gitrepo, '.gitignore'), 'w') as stream:
            stream.write('*~')
        repo.index.add('.gitignore')
        repo.index.write()

        # Commits the files added
        tree = repo.index.write_tree()
        author = pygit2.Signature(
            'Alice Author', '[email protected]')
        committer = pygit2.Signature(
            'Cecil Committer', '[email protected]')
        commit_hash = repo.create_commit(
            'refs/heads/feature_branch',  # the name of the reference to update
            author,
            committer,
            'Add .gitignore file for testing',
            # binary string representing the tree object ID
            tree,
            # list of binary strings representing parents of the new commit
            [first_commit.oid.hex]
        )

        # All good but the commit id
        data = {
            'repo': 'test',
            'commit_id': 'foo',
            'csrf_token': csrf_token,
        }
        output = self.app.post('/pv/branches/commit/', data=data)
        self.assertEqual(output.status_code, 404)
        js_data = json.loads(output.data.decode('utf-8'))
        self.assertDictEqual(
            js_data,
            {
                u'code': u'ERROR',
                u'message': 'This commit could not be found in this repo'
            }
        )

        # All good
        data = {
            'repo': 'test',
            'commit_id': commit_hash,
            'csrf_token': csrf_token,
        }
        output = self.app.post('/pv/branches/commit/', data=data)
        self.assertEqual(output.status_code, 200)
        js_data = json.loads(output.data.decode('utf-8'))
        self.assertDictEqual(
            js_data,
            {
                u'code': u'OK',
                u'branches': ['feature_branch'],
            }
        )

Example 20

Project: nupic Source File: HypersearchWorker.py
  def run(self):
    """ Run this worker.

    Parameters:
    ----------------------------------------------------------------------
    retval:     jobID of the job we ran. This is used by unit test code
                  when calling this working using the --params command
                  line option (which tells this worker to insert the job
                  itself).
    """
    # Easier access to options
    options = self._options

    # ---------------------------------------------------------------------
    # Connect to the jobs database
    self.logger.info("Connecting to the jobs database")
    cjDAO = ClientJobsDAO.get()

    # Get our worker ID
    self._workerID = cjDAO.getConnectionID()

    if options.clearModels:
      cjDAO.modelsClearAll()

    # -------------------------------------------------------------------------
    # if params were specified on the command line, insert a new job using
    #  them.
    if options.params is not None:
      options.jobID = cjDAO.jobInsert(client='hwTest', cmdLine="echo 'test mode'",
                  params=options.params, alreadyRunning=True,
                  minimumWorkers=1, maximumWorkers=1,
                  jobType = cjDAO.JOB_TYPE_HS)
    if options.workerID is not None:
      wID = options.workerID
    else:
      wID = self._workerID
    
    buildID = Configuration.get('nupic.software.buildNumber', 'N/A')
    logPrefix = '<BUILDID=%s, WORKER=HW, WRKID=%s, JOBID=%s> ' % \
                (buildID, wID, options.jobID)
    ExtendedLogger.setLogPrefix(logPrefix)

    # ---------------------------------------------------------------------
    # Get the search parameters
    # If asked to reset the job status, do that now
    if options.resetJobStatus:
      cjDAO.jobSetFields(options.jobID,
           fields={'workerCompletionReason': ClientJobsDAO.CMPL_REASON_SUCCESS,
                   'cancel': False,
                   #'engWorkerState': None
                   },
           useConnectionID=False,
           ignoreUnchanged=True)
    jobInfo = cjDAO.jobInfo(options.jobID)
    self.logger.info("Job info retrieved: %s" % (str(clippedObj(jobInfo))))


    # ---------------------------------------------------------------------
    # Instantiate the Hypersearch object, which will handle the logic of
    #  which models to create when we need more to evaluate.
    jobParams = json.loads(jobInfo.params)

    # Validate job params
    jsonSchemaPath = os.path.join(os.path.dirname(__file__),
                                  "jsonschema",
                                  "jobParamsSchema.json")
    validate(jobParams, schemaPath=jsonSchemaPath)


    hsVersion = jobParams.get('hsVersion', None)
    if hsVersion == 'v2':
      self._hs = HypersearchV2(searchParams=jobParams, workerID=self._workerID,
              cjDAO=cjDAO, jobID=options.jobID, logLevel=options.logLevel)
    else:
      raise RuntimeError("Invalid Hypersearch implementation (%s) specified" \
                          % (hsVersion))


    # =====================================================================
    # The main loop.
    try:
      exit = False
      numModelsTotal = 0
      print >>sys.stderr, "reporter:status:Evaluating first model..."
      while not exit:

        # ------------------------------------------------------------------
        # Choose a model to evaluate
        batchSize = 10              # How many to try at a time.
        modelIDToRun = None
        while modelIDToRun is None:

          if options.modelID is None:
            # -----------------------------------------------------------------
            # Get the latest results on all running models and send them to
            #  the Hypersearch implementation
            # This calls cjDAO.modelsGetUpdateCounters(), compares the
            # updateCounters with what we have cached, fetches the results for the
            # changed and new models, and sends those to the Hypersearch
            # implementation's self._hs.recordModelProgress() method.
            self._processUpdatedModels(cjDAO)
  
            # --------------------------------------------------------------------
            # Create a new batch of models
            (exit, newModels) = self._hs.createModels(numModels = batchSize)
            if exit:
              break

            # No more models left to create, just loop. The _hs is waiting for
            #   all remaining running models to complete, and may pick up on an
            #  orphan if it detects one.
            if len(newModels) == 0:
              continue
  
            # Try and insert one that we will run
            for (modelParams, modelParamsHash, particleHash) in newModels:
              jsonModelParams = json.dumps(modelParams)
              (modelID, ours) = cjDAO.modelInsertAndStart(options.jobID,
                                  jsonModelParams, modelParamsHash, particleHash)
  
              # Some other worker is already running it, tell the Hypersearch object
              #  so that it doesn't try and insert it again
              if not ours:
                mParamsAndHash = cjDAO.modelsGetParams([modelID])[0]
                mResult = cjDAO.modelsGetResultAndStatus([modelID])[0]
                results = mResult.results
                if results is not None:
                  results = json.loads(results)
  
                modelParams = json.loads(mParamsAndHash.params)
                particleHash = cjDAO.modelsGetFields(modelID, 
                                  ['engParticleHash'])[0]
                particleInst = "%s.%s" % (
                          modelParams['particleState']['id'],
                          modelParams['particleState']['genIdx'])
                self.logger.info("Adding model %d to our internal DB " \
                      "because modelInsertAndStart() failed to insert it: " \
                      "paramsHash=%s, particleHash=%s, particleId='%s'", modelID, 
                      mParamsAndHash.engParamsHash.encode('hex'),
                      particleHash.encode('hex'), particleInst)
                self._hs.recordModelProgress(modelID = modelID,
                      modelParams = modelParams,
                      modelParamsHash = mParamsAndHash.engParamsHash,
                      results = results,
                      completed = (mResult.status == cjDAO.STATUS_COMPLETED),
                      completionReason = mResult.completionReason,
                      matured = mResult.engMatured,
                      numRecords = mResult.numRecords)
              else:
                modelIDToRun = modelID
                break
  
          else:
            # A specific modelID was passed on the command line
            modelIDToRun = int(options.modelID)
            mParamsAndHash = cjDAO.modelsGetParams([modelIDToRun])[0]
            modelParams = json.loads(mParamsAndHash.params)
            modelParamsHash = mParamsAndHash.engParamsHash
            
            # Make us the worker
            cjDAO.modelSetFields(modelIDToRun,
                                     dict(engWorkerConnId=self._workerID))
            if False:
              # Change the hash and params of the old entry so that we can
              #  create a new model with the same params
              for attempt in range(1000):
                paramsHash = hashlib.md5("OrphanParams.%d.%d" % (modelIDToRun,
                                                                 attempt)).digest()
                particleHash = hashlib.md5("OrphanParticle.%d.%d" % (modelIDToRun,
                                                                  attempt)).digest()
                try:
                  cjDAO.modelSetFields(modelIDToRun,
                                           dict(engParamsHash=paramsHash,
                                                engParticleHash=particleHash))
                  success = True
                except:
                  success = False
                if success:
                  break
              if not success:
                raise RuntimeError("Unexpected failure to change paramsHash and "
                                   "particleHash of orphaned model")
              
              (modelIDToRun, ours) = cjDAO.modelInsertAndStart(options.jobID,
                                  mParamsAndHash.params, modelParamsHash)

            
            
            # ^^^ end while modelIDToRun ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

        # ---------------------------------------------------------------
        # We have a model, evaluate it now
        # All done?
        if exit:
          break

        # Run the model now
        self.logger.info("RUNNING MODEL GID=%d, paramsHash=%s, params=%s",
              modelIDToRun, modelParamsHash.encode('hex'), modelParams)

        # ---------------------------------------------------------------------
        # Construct model checkpoint GUID for this model:
        # jobParams['persistentJobGUID'] contains the client's (e.g., API Server)
        # persistent, globally-unique model identifier, which is what we need;
        persistentJobGUID = jobParams['persistentJobGUID']
        assert persistentJobGUID, "persistentJobGUID: %r" % (persistentJobGUID,)

        modelCheckpointGUID = jobInfo.client + "_" + persistentJobGUID + (
          '_' + str(modelIDToRun))


        self._hs.runModel(modelID=modelIDToRun, jobID = options.jobID,
                          modelParams=modelParams, modelParamsHash=modelParamsHash,
                          jobsDAO=cjDAO, modelCheckpointGUID=modelCheckpointGUID)

        # TODO: don't increment for orphaned models
        numModelsTotal += 1

        self.logger.info("COMPLETED MODEL GID=%d; EVALUATED %d MODELs",
          modelIDToRun, numModelsTotal)
        print >>sys.stderr, "reporter:status:Evaluated %d models..." % \
                                    (numModelsTotal)
        print >>sys.stderr, "reporter:counter:HypersearchWorker,numModels,1"

        if options.modelID is not None:
          exit = True
        # ^^^ end while not exit

    finally:
      # Provide Hypersearch instance an opportunity to clean up temporary files
      self._hs.close()

    self.logger.info("FINISHED. Evaluated %d models." % (numModelsTotal))
    print >>sys.stderr, "reporter:status:Finished, evaluated %d models" % (numModelsTotal)
    return options.jobID

Example 21

Project: exoline Source File: spec.py
    def run(self, cmd, args, options):
        if args['--example']:
            s = '''
# Example client specification file
# Specification files are in YAML format (a superset of JSON
# with more readable syntax and support for comments) and
# look like this. They may contain comments that begin
# with a # sign.

# Device client model information
device:
    model: "myModel"
    vendor: "myVendor"

# list of dataports that must exist
dataports:
      # this the absolute minimum needed to specify a
      # dataport.
    - alias: mystring
      # names are created, but not compared
    - name: Temperature
      # aliases, type, and format are created
      # and compared
      alias: temp
      format: float
      unit: °F
    - name: LED Control
      alias: led6
      format: integer
    - alias: config
      # format should be string, and parseable JSON
      format: string/json
      # initial value (if no other value is read back)
      initial: '{"text": "555-555-1234", "email": "[email protected]"}'
    - alias: person
      format: string/json
      # JSON schema specified inline (http://json-schema.org/)
      # format must be string/json to do validate
      # you may also specify a string to reference schema in an
      # external file. E.g. jsonschema: personschema.json
      jsonschema: {"title": "Person Schema",
                   "type": "object",
                   "properties": {"name": {"type": "string"}},
                   "required": ["name"]}
      initial: '{"name":"John Doe"}'
    - alias: place
      # An description of the dataport.
      description: 'This is a place I have been'
      # Dataports are not public by default,
      # but if you want to share one with the world
      public: true

    # any dataports not listed but found in the client
    # are ignored. The spec command does not delete things.

# list of script datarules that must exist
scripts:
    # by default, scripts are datarules with
    # names and aliases set to the file name
    - file: test/files/helloworld.lua
    # you can also set them explicitly
    - file: test/files/helloworld.lua
      alias: greeting
    # you can also place lua code inline
    - alias: singleLineScript
      code: debug('hello from inside lua!')
    # multiline lua scripts should start with | and
    # be indented inside the "code:" key.
    - alias: multilineScript
      code: |
        for x=1,10 do
            debug('hello from for loop ' .. x)
        end
    # simple templating for script aliases and
    # content is also supported.
    - file: test/files/convert.lua
      # if <% id %> is embedded in aliases
      # or script content, the --ids parameter must
      # be passed in. The spec command then expects
      # a script or dataport resource per id passed, substituting
      # each ID for <% id %>. In this example, if the command was:
      #
      # $ exo spec mysensor sensorspec.yaml --ids=A,B
      #
      # ...then the spec command looks for *two* script datarules
      # in mysensor, with aliases convertA.lua and convertB.lua.
      # Additionally, any instances of <% id %> in the content of
      # convert.lua are substituted with A and B before being
      # written to each script datarule.
      #
      alias: convert<% id %>.lua

# list of dispatches that must exist
dispatches:
    - alias: myDispatch
      # email | http_get | http_post | http_put | sms | xmpp
      method: email
      recipient: [email protected]
      message: hello from Exoline spec example!
      subject: hello!
      # may be an RID or alias
      subscribe: mystring

# list of simple datarules that must exist.
# scripts may go here too, but it's better to
# to put them under scripts (above)
datarules:
    - alias: highTemp
      format: float
      subscribe: temp
      rule: {
        "simple": {
          "comparison": "gt",
          "constant": 80,
          "repeat": true
        }
      }
'''
            if not six.PY3:
                s = s.encode('utf-8')
            print(s)
            return

        ExoException = options['exception']
        def load_file(path, base_url=None):
            '''load a file based on a path that may be a filesystem path
            or a URL. Consider it a URL if it starts with two or more
            alphabetic characters followed by a colon'''
            def load_from_url(url):
                # URL. use requests
                r = requests.get(url)
                if r.status_code >= 300:
                    raise ExoException('Failed to read file at URL ' + url)
                return r.text, '/'.join(r.url.split('/')[:-1])

            if re.match('[a-z]{2}[a-z]*:', path):
                return load_from_url(path)
            elif base_url is not None:
                # non-url paths when spec is loaded from URLs
                # are considered relative to that URL
                return load_from_url(base_url + '/' + path)
            else:
                with open(path, 'rb') as f:
                    return f.read(), None


        def load_spec(args):
            # returns loaded spec and path for script files
            try:
                content, base_url = load_file(args['<spec-yaml>'])
                spec = yaml.safe_load(content)
                return spec, base_url
            except yaml.scanner.ScannerError as ex:
                raise ExoException('Error parsing YAML in {0}\n{1}'.format(args['<spec-yaml>'],ex))

        def check_spec(spec, args):
            msgs = []
            for typ in TYPES:
                if typ in spec and plural(typ) not in spec:
                    msgs.append('found "{0}"... did you mean "{1}"?'.format(typ, typ + 's'))
            for dp in spec.get('dataports', []):
                if 'alias' not in dp:
                    msgs.append('dataport is missing alias: {0}'.format(dp))
                    continue
                alias = dp['alias']
                if 'jsonschema' in dp:
                    schema = dp['jsonschema']
                    if isinstance(schema, six.string_types):
                        schema = json.loads(open(schema).read())
                    try:
                        jsonschema.Draft4Validator.check_schema(schema)
                    except Exception as ex:
                        msgs.append('{0} failed jsonschema validation.\n{1}'.format(alias, str(ex)))
            if len(msgs) > 0:
                raise ExoException('Found some problems in spec:\n' + '\n'.join(msgs))

        if args['--check']:
            # Validate all the jsonschema
            spec, base_url = load_spec(args)
            check_spec(spec, args)
            return

        reid = re.compile('<% *id *%>')
        def infoval(input_auth, alias):
            '''Get info and latest value for a resource'''
            return rpc._exomult(
                input_auth,
                [['info', {'alias': alias}, {'description': True, 'basic': True}],
                ['read', {'alias': alias}, {'limit': 1}]])

        def check_or_create_description(auth, info, args):
            if 'device' in spec and 'limits' in spec['device']:
                speclimits = spec['device']['limits']
                infolimits = info['description']['limits']
                limits_mismatched = False
                for limit in speclimits:
                    if limit not in infolimits:
                        raise ExoException('spec file includes invalid limit {0}'.format(limit))
                    if speclimits[limit] != infolimits[limit]:
                        limits_mismatched = True
                if limits_mismatched:
                    if create:
                        if 'client_id' not in auth:
                            raise ExoException('limits update for client requires --portal or --domain')

                        rpc.update(auth['cik'], auth['client_id'], {'limits': speclimits})
                        sys.stdout.write('updated limits for client' +
                                         ' RID {0}'.format(auth['client_id']))
                    else:
                        sys.stdout.write(
                            'limits for client {0} do not match spec:\nspec: {1}\nclient: {2}'.format(
                                auth,
                                json.dumps(speclimits, sort_keys=True),
                                json.dumps(infolimits, sort_keys=True)))


        def check_or_create_common(auth, res, info, alias, aliases):
            if info['basic']['type'] != typ:
                raise ExoException('{0} is a {1} but should be a {2}.'.format(alias, info['basic']['type'], typ))

            new_desc = info['description'].copy()
            need_update = False

            if 'public' in res:
                res_pub = res['public']
                desc = info['description']
                if desc['public'] != res_pub:
                    if create:
                        new_desc['public'] = res_pub
                        need_update = True
                    else:
                        sys.stdout.write('spec expects public for {0} to be {1}, but it is not.\n'.format(alias, res_pub))
                        print(json.dumps(res))

            if 'subscribe' in res:
                # Alias *must* be local to this client
                resSub = res['subscribe']
                # Lookup alias/name if need be
                if resSub in aliases:
                    resSub = aliases[resSub]
                desc = info['description']
                if desc['subscribe'] != resSub:
                    if create:
                        new_desc['subscribe'] = resSub
                        need_update = True
                    else:
                        sys.stdout.write('spec expects subscribe for {0} to be {1}, but they are not.\n'.format(alias, resSub))

            if 'preprocess' in res:
                def fromAliases(pair):
                    if pair[1] in aliases:
                        return [pair[0], aliases[pair[1]]]
                    else:
                        return pair
                resPrep = [fromAliases(x) for x in res['preprocess']]
                preprocess = info['description']['preprocess']
                if create:
                    new_desc['preprocess'] = resPrep
                    need_update = True
                else:
                    if preprocess is None or len(preprocess) == 0:
                        sys.stdout.write('spec expects preprocess for {0} to be {1}, but they are missing.\n'.format(alias, resPrep))
                    elif preprocess != resPrep:
                        sys.stdout.write('spec expects preprocess for {0} to be {1}, but they are {2}.\n'.format(alias, resPrep, preprocess))

            if 'retention' in res:
                resRet = {}
                if 'count' in res['retention']:
                    resRet['count'] = res['retention']['count']
                if 'duration' in res['retention']:
                    resRet['duration'] = res['retention']['duration']
                retention = info['description']['retention']
                if create:
                    new_desc['retention'] = resRet
                    need_update = True
                elif retention != resRet:
                    sys.stdout.write('spec expects retention for {0} to be {1}, but they are {2}.\n'.format(alias, resRet, retention))

            if need_update:
                rpc.update(auth, {'alias': alias}, new_desc)

        def get_format(res, default='string'):
            format = res['format'] if 'format' in res else default
            pieces = format.split('/')
            if len(pieces) > 1:
                format = pieces[0]
                format_content = pieces[1]
            else:
                format_content = None
            return format, format_content

        def add_desc(key, res, desc, required=False):
            '''add key from spec resource to a 1P resource description'''
            if key in res:
                desc[key] = res[key]
            else:
                if required:
                    raise ExoException('{0} in spec is missing required property {1}.'.format(alias, key))

        def create_resource(auth, typ, desc, alias, msg=''):
            name = res['name'] if 'name' in res else alias
            print('Creating {0} with name: {1}, alias: {2}{3}'.format(
                typ, name, alias, msg))
            rid = rpc.create(auth, typ, desc, name=name)
            rpc.map(auth, rid, alias)
            info, val = infoval(auth, alias)
            aliases[alias] = rid
            return info, val

        def check_or_create_datarule(auth, res, info, val, alias, aliases):
            format, format_content = get_format(res, 'float')
            if not exists and create:
                desc = {'format': format}
                desc['retention'] = {'count': 'infinity', 'duration': 'infinity'}
                add_desc('rule', res, desc, required=True)
                info, val = create_resource(
                    auth,
                    'datarule',
                    desc,
                    alias,
                    msg=', format: {0}, rule: {1}'.format(desc['format'], desc['rule']))

            # check format
            if format != info['description']['format']:
                raise ExoException(
                    '{0} is a {1} but should be a {2}.'.format(
                    alias, info['description']['format'], format))

            # check rule
            infoRule = json.dumps(info['description']['rule'], sort_keys=True)
            specRule = json.dumps(res['rule'], sort_keys=True)
            if infoRule != specRule:
                if create:
                    info['description']['rule'] = res['rule']
                    rpc.update(auth, {'alias': alias}, info['description'])
                    sys.stdout.write('updated rule for {0}\n'.format(alias))
                else:
                    sys.stdout.write(
                        'spec expects rule for {0} to be:\n{1}\n...but it is:\n{2}\n'.format(
                        alias, specRule, infoRule))

            check_or_create_common(auth, res, info, alias, aliases)

        def check_or_create_dataport(auth, res, info, val, alias, aliases):
            format, format_content = get_format(res, 'string')
            if not exists and create:
                desc = {'format': format}
                desc['retention'] = {'count': 'infinity', 'duration': 'infinity'}
                info, val = create_resource(
                    auth,
                    'dataport',
                    desc,
                    alias,
                    msg=', format: {0}'.format(format))

            # check format
            if format != info['description']['format']:
                raise ExoException(
                    '{0} is a {1} but should be a {2}.'.format(
                    alias, info['description']['format'], format))

            # check initial value
            if 'initial' in res and len(val) == 0:
                if create:
                    initialValue = template(res['initial'])
                    print('Writing initial value {0}'.format(initialValue))
                    rpc.write(auth, {'alias': alias}, initialValue)
                    # update values being validated
                    info, val = infoval(auth, alias)
                else:
                    print('Required initial value not found in {0}. Pass --create to write initial value.'.format(alias))

            # check format content (e.g. json)
            if format_content == 'json':
                if format != 'string':
                    raise ExoException(
                        'Invalid spec for {0}. json content type only applies to string, not {1}.'.format(alias, format));
                if len(val) == 0:
                    print('Spec requires {0} be in JSON format, but it is empty.'.format(alias))
                else:
                    obj = None
                    try:
                        obj = json.loads(val[0][1])
                    except:
                        print('Spec requires {0} be in JSON format, but it does not parse as JSON. Value: {1}'.format(
                            alias,
                            val[0][1]))

                    if obj is not None and 'jsonschema' in res:
                        schema = res['jsonschema']
                        if isinstance(schema, six.string_types):
                            schema = json.loads(open(schema).read())
                        try:
                            jsonschema.validate(obj, schema)
                        except Exception as ex:
                            print("{0} failed jsonschema validation.".format(alias))
                            print(ex)

            elif format_content is not None:
                raise ExoException(
                    'Invalid spec for {0}. Unrecognized format content {1}'.format(alias, format_content))

            # check unit
            if 'unit' in res or 'description' in res:
                meta_string = info['description']['meta']
                try:
                    meta = json.loads(meta_string)
                except:
                    meta = None

                def bad_desc_msg(s):
                    desc='""'
                    if 'description' in res:
                        desc = res['description']
                    sys.stdout.write('spec expects description for {0} to be {1}{2}\n'.format(alias, desc, s))
                def bad_unit_msg(s):
                    unit=''
                    if 'unit' in res:
                        unit = res['unit']
                    sys.stdout.write('spec expects unit for {0} to be {1}{2}\n'.format(alias, unit, s))

                if create:
                    if meta is None:
                        meta = {'datasource':{'description':'','unit':''}}
                    if 'datasource' not in meta:
                        meta['datasource'] = {'description':'','unit':''}
                    if 'unit' in res:
                        meta['datasource']['unit'] = res['unit']
                    if 'description:' in res:
                        meta['datasource']['description'] = res['description']

                    info['description']['meta'] = json.dumps(meta)
                    rpc.update(auth, {'alias': alias}, info['description'])

                else:
                    if meta is None:
                        sys.stdout.write('spec expects metadata but found has no metadata at all. Pass --create to write metadata.\n')
                    elif 'datasource' not in meta:
                        sys.stdout.write('spec expects datasource in metadata but found its not there. Pass --create to write metadata.\n')
                    elif 'unit' not in meta['datasource'] and 'unit' in res:
                        bad_unit_msg(', but no unit is specified in metadata. Pass --create to set unit.\n')
                    elif 'description' not in meta['datasource'] and 'description' in res:
                        bad_desc_msg(', but no description is specified in metadata. Pass --create to set description.\n')
                    elif 'unit' in res and meta['datasource']['unit'] != res['unit']:
                        bad_unit_msg(', but metadata specifies unit of {0}. Pass --create to update unit.\n'.format(meta['datasource']['unit']))
                    elif 'description' in res and meta['datasource']['description'] != res['description']:
                        bad_desc_msg(', but metadata specifies description of {0}. Pass --create to update description.\n'.format(meta['datasource']['description']))

            check_or_create_common(auth, res, info, alias, aliases)

        def check_or_create_dispatch(auth, res, info, alias, aliases):
            if not exists and create:
                desc = {}
                add_desc('method', res, desc, required=True)
                add_desc('recipient', res, desc, required=True)
                add_desc('subject', res, desc)
                add_desc('message', res, desc)
                desc['retention'] = {'count': 'infinity', 'duration': 'infinity'}
                info, val = create_resource(
                    auth,
                    'dispatch',
                    desc,
                    alias,
                    msg=', method: {0}, recipient: {1}'.format(desc['method'], desc['recipient']))

            # check dispatch-specific things
            def check_desc(key, res, desc):
                '''check a specific key and return whether an update is required'''
                if key in res and desc[key] != res[key]:
                    if create:
                        desc[key] = res[key]
                        return True
                    else:
                        sys.stdout.write(
                            'spec expects {0} for {1} to be {2} but it is {3}\n'.format(
                            key, alias, res[key], desc[key]))
                return False

            desc = info['description']
            need_update = False
            need_update = check_desc('method', res, desc) or need_update
            need_update = check_desc('recipient', res, desc) or need_update
            need_update = check_desc('subject', res, desc) or need_update
            need_update = check_desc('message', res, desc) or need_update
            if need_update:
                rpc.update(auth, {'alias': alias}, desc)
                sys.stdout.write('updated {0} to {1}\n'.format(alias, json.dumps(desc, sort_keys=True)))

            check_or_create_common(auth, res, info, alias, aliases)


        input_auth = options['auth']
        exoutils = options['utils']
        rpc = options['rpc']
        asrid = args['--asrid']

        if cmd == 'spec':

            if args['--generate'] is not None:
                spec_file = args['--generate']
                if args['--scripts'] is not None:
                    script_dir = args['--scripts']
                else:
                    script_dir = 'scripts'
                print('Generating spec for {0}.'.format(input_auth))
                print('spec file: {0}, scripts directory: {1}'.format(spec_file, script_dir))

                # generate spec file, download scripts
                spec = {}
                info, listing = rpc._exomult(input_auth,
                    [['info', {'alias': ''}, {'basic': True,
                                              'description': True,
                                              'aliases': True}],
                     ['listing', ['dataport', 'datarule', 'dispatch'], {}, {'alias': ''}]])
                rids = listing['dataport'] + listing['datarule'] + listing['dispatch']

                if len(rids) > 0:
                    child_info = rpc._exomult(input_auth, [['info', rid, {'basic': True, 'description': True}] for rid in rids])
                    for idx, rid in enumerate(rids):
                        myinfo = child_info[idx]
                        name = myinfo['description']['name']
                        def skip_msg(msg):
                            print('Skipping {0} (name: {1}). {2}'.format(rid, name, msg))
                        if rid not in info['aliases']:
                            skip_msg('It needs an alias.')
                            continue

                        # adds properties common to dataports and dispatches:
                        # preprocess, subscribe, retention, meta, public
                        def add_common_things(res):
                            res['name'] = myinfo['description']['name']
                            res['alias'] = info['aliases'][rid][0]
                            preprocess = myinfo['description']['preprocess']
                            if preprocess is not None and len(preprocess) > 0:
                                def toAlias(pair):
                                    if not asrid and pair[1] in info['aliases']:
                                        return [pair[0], info['aliases'][pair[1]][0]]
                                    else:
                                        return pair
                                res['preprocess'] = [toAlias(x) for x in preprocess]


                            subscribe = myinfo['description']['subscribe']
                            if subscribe is not None and subscribe is not "":
                                if not asrid and subscribe in info['aliases']:
                                    res['subscribe'] = info['aliases'][subscribe][0]
                                else:
                                    res['subscribe'] = subscribe

                            retention = myinfo['description']['retention']
                            if retention is not None:
                                count = retention['count']
                                duration = retention['duration']
                                if count is not None and duration is not None:
                                    if count == 'infinity':
                                        del retention['count']
                                    if duration == 'infinity':
                                        del retention['duration']
                                    if len(retention) > 0:
                                        res['retention'] = retention

                            meta_string = myinfo['description']['meta']
                            try:
                                meta = json.loads(meta_string)
                                unit = meta['datasource']['unit']
                                if len(unit) > 0:
                                    res['unit'] = unit
                                desc = meta['datasource']['description']
                                if len(desc) > 0:
                                    res['description'] = desc
                            except:
                                # assume unit is not present in metadata
                                pass

                            public = myinfo['description']['public']
                            if public is not None and public:
                                res['public'] = public


                        typ = myinfo['basic']['type']
                        if typ == 'dataport':
                            res = {
                                'format': myinfo['description']['format']
                            }
                            add_common_things(res)
                            spec.setdefault('dataports', []).append(res)

                        elif typ == 'datarule':
                            desc = myinfo['description']
                            is_script = desc['format'] == 'string' and 'rule' in desc and 'script' in desc['rule']
                            if is_script:
                                if not os.path.exists(script_dir):
                                    os.makedirs(script_dir)
                                filename = os.path.join(script_dir, info['aliases'][rid][0])
                                spec.setdefault('scripts', []).append({'file': filename})
                                with open(filename, 'w') as f:
                                    print('Writing {0}...'.format(filename))
                                    f.write(desc['rule']['script'].encode('utf8'))
                            else:
                                res = {
                                    'rule': desc['rule']
                                }
                                add_common_things(res)
                                spec.setdefault('datarules', []).append(res)

                        elif typ == 'dispatch':
                            desc = myinfo['description']
                            res = {
                                'method': desc['method'],
                                'message': desc['message'],
                                'recipient': desc['recipient'],
                                'subject': desc['subject']
                            }
                            add_common_things(res)
                            spec.setdefault('dispatches', []).append(res)

                with open(spec_file, 'w') as f:
                    print('Writing {0}...'.format(spec_file))
                    yaml.safe_dump(spec, f, encoding='utf-8', indent=4, default_flow_style=False, allow_unicode=True)
                return

            updatescripts = args['--update-scripts']
            create = args['--create']

            def query_yes_no(question, default="yes"):
                """Ask a yes/no question via raw_input() and return their answer.

                "question" is a string that is presented to the user.
                "default" is the presumed answer if the user just hits <Enter>.
                    It must be "yes" (the default), "no" or None (meaning
                    an answer is required of the user).

                The "answer" return value is one of "yes" or "no".
                """
                valid = {"yes":True,   "y":True,  "ye":True,
                         "no":False,     "n":False}
                if default == None:
                    prompt = " [y/n] "
                elif default == "yes":
                    prompt = " [Y/n] "
                elif default == "no":
                    prompt = " [y/N] "
                else:
                    raise ValueError("invalid default answer: '%s'" % default)

                while True:
                    sys.stdout.write(question + prompt)
                    choice = raw_input().lower()
                    if default is not None and choice == '':
                        return valid[default]
                    elif choice in valid:
                        return valid[choice]
                    else:
                        sys.stdout.write("Please respond with 'yes' or 'no' "\
                                         "(or 'y' or 'n').\n")

            def generate_aliases_and_data(res, args):
                ids = args['--ids']
                if 'alias' in res:
                    alias = res['alias']
                else:
                    if 'file' in res:
                        alias = os.path.basename(res['file'])
                    else:
                        raise ExoException('Resources in spec must have an alias. (For scripts, "file" will substitute.)')

                if reid.search(alias) is None:
                    yield alias, None
                else:
                    alias_template = alias
                    if ids is None:
                        raise ExoException('This spec requires --ids')
                    ids = ids.split(',')
                    for id, alias in [(id, reid.sub(id, alias_template)) for id in ids]:
                        yield alias, {'id': id}

            spec, base_url = load_spec(args)
            check_spec(spec, args)

            device_auths = []
            portal_ciks = []

            iterate_portals = False

            def auth_string(auth):
                if isinstance(auth, dict):
                    return json.dumps(auth)
                else:
                    return auth

            if args['--portal'] == True:
                cik = exoutils.get_cik(input_auth, allow_only_cik=True)
                portal_ciks.append((cik,''))
                iterate_portals = True

            if args['--domain'] == True:
                cik = exoutils.get_cik(input_auth, allow_only_cik=True)
                #set iterate_portals flag to true so we can interate over each portal
                iterate_portals = True
                # Get list of users under a domain
                user_keys = []
                clients = rpc._listing_with_info(cik,['client'])

                email_regex = re.compile(r'[^@]+@[^@]+\.[^@]+')

                for k,v in clients['client'].items():
                    name = v['description']['name']
                    # if name is an email address
                    if email_regex.match(name):
                        user_keys.append(v['key'])


                # Get list of each portal
                for key in user_keys:
                    userlisting = rpc._listing_with_info(key,['client'])
                    for k,v in userlisting['client'].items():
                        portal_ciks.append((v['key'],v['description']['name']))
                    #print(x)


            if iterate_portals == True:
                for portal_cik, portal_name in portal_ciks:
                    # If user passed in the portal flag, but the spec doesn't have
                    # a vendor/model, exit
                    if (not 'device' in spec) or (not 'model' in spec['device']) or (not 'vendor' in spec['device']):
                        print("With --portal (or --domain) option, spec file requires a\r\n"
                              "device model and vendor field:\r\n"
                              "e.g.\r\n"
                              "device:\r\n"
                              "    model: modelName\r\n"
                              "    vendor: vendorName\r\n")
                        raise ExoException('--portal flag requires a device model/vendor in spec file')
                    else:

                        # get device vendor and model
                        modelName = spec['device']['model']
                        vendorName = spec['device']['vendor']

                        # If the portal has no name, use the cik as the name
                        if portal_name == '':
                            portal_name = portal_cik
                        print('Looking in ' + portal_name + ' for ' + modelName + '/' + vendorName)
                        # Get all clients in the portal
                        clients = rpc._listing_with_info(portal_cik, ['client'])
                        #print(modelName)
                        # for each client
                        for rid, v in iteritems(list(iteritems(clients))[0][1]):
                            # Get meta field
                            validJson = False
                            meta = None
                            try:
                                meta = json.loads(v['description']['meta'])
                                validJson = True
                            except ValueError as e:
                                # no json in this meat field
                                validJson = False
                            if validJson == True:
                                # get device type (only vendor types have a model and vendor
                                typ = meta['device']['type']

                                # if the device type is 'vendor'
                                if typ == 'vendor':
                                    # and it matches our vendor/model in the spec file
                                    if meta['device']['vendor'] == vendorName:
                                        if meta['device']['model'] == modelName:
                                            # Append an auth for this device to our list
                                            auth = {
                                                'cik': portal_cik, # v['key'],
                                                'client_id': rid
                                            }
                                            device_auths.append(auth)
                                            print('  found: {0} {1}'.format(v['description']['name'], auth_string(auth)))
            else:
                # only for single client
                device_auths.append(input_auth)

            # Make sure user knows they are about to update multiple devices
            # unless the `-f` flag is passed
            if ((args['--portal'] or args['--domain']) and args['--create']) and not args['-f']:
                res = query_yes_no("You are about to update " + str(len(device_auths)) + " devices, are you sure?")
                if res == False:
                    print('exiting')
                    return

            # for each device in our list of device_auths
            for auth in device_auths:
                try:
                    aliases = {}
                    print("Running spec on: {0}".format(auth_string(auth)))
                    #   apply spec [--create]

                    # Get map of aliases and description
                    info = rpc.info(auth, {'alias': ''}, {'aliases': True, 'description': True})
                    try:
                        for rid, alist in info['aliases'].items():
                            for alias in alist:
                                aliases[alias] = rid
                    except:
                        pass

                    # Check limits
                    check_or_create_description(auth, info, args)

                    for typ in TYPES:
                        for res in spec.get(plural(typ), []):
                            for alias, resource_data in generate_aliases_and_data(res, args):
                                # TODO: handle nonexistence
                                exists = True
                                try:
                                    info, val = infoval(auth, alias)
                                except rpc.RPCException as e:
                                    info = None
                                    val = None
                                    exists = False
                                    print('{0} not found.'.format(alias))
                                    if not create:
                                        print('Pass --create to create it')
                                        continue
                                except pyonep.exceptions.OnePlatformException as ex:
                                    exc = ast.literal_eval(ex.message)

                                    if exc['code'] == 401:
                                        raise Spec401Exception()
                                    else:
                                        raise ex

                                def template(script):
                                    if resource_data is None:
                                        return script
                                    else:
                                        return reid.sub(resource_data['id'], script)

                                if typ == 'client':
                                    if not exists:
                                        if create:
                                            print('Client creation is not yet supported')
                                        continue
                                elif typ == 'dataport':
                                    check_or_create_dataport(auth, res, info, val, alias, aliases)
                                elif typ == 'dispatch':
                                    check_or_create_dispatch(auth, res, info, alias, aliases)
                                elif typ == 'datarule':
                                    check_or_create_datarule(auth, res, info, val, alias, aliases)
                                elif typ == 'script':
                                    if 'file' not in res and 'code' not in res:
                                        raise ExoException('{0} is a script, so it needs a "file" or "code" key'.format(alias))
                                    if 'file' in res and 'code' in res:
                                        raise ExoException('{0} specifies both "file" and "code" keys, but they\'re mutually exclusive.')

                                    name = res['name'] if 'name' in res else alias

                                    if 'file' in res:
                                        content, _ = load_file(res['file'], base_url=base_url)
                                        if not six.PY3 or type(content) is bytes:
                                            content = content.decode('utf8')
                                    else:
                                        content = res['code']
                                    if not exists and create:
                                        rpc.upload_script_content([auth], content, name=alias, create=True, filterfn=template)
                                        continue

                                    script_spec = template(content)
                                    script_svr = info['description']['rule']['script']
                                    script_friendly = 'file {0}'.format(res['file']) if 'file' in res else '"code" value in spec'
                                    if script_svr != script_spec:
                                        print('Script for {0} does not match {1}.'.format(alias, script_friendly))
                                        if updatescripts:
                                            print('Uploading script to {0}...'.format(alias))
                                            rpc.upload_script_content([auth], script_spec, name=name, create=False, filterfn=template)
                                        elif not args['--no-diff']:
                                            # show diff
                                            import difflib
                                            differences = '\n'.join(
                                                difflib.unified_diff(
                                                    script_spec.splitlines(),
                                                    script_svr.splitlines(),
                                                    fromfile=script_friendly,
                                                    tofile='info["description"]["rule"]["script"]'))

                                            print(differences)
                                else:
                                    raise ExoException('Found unsupported type {0} in spec.'.format(typ))
                except Spec401Exception as ex:
                    print("cuem**WARNING******* 401 received in spec, is the device expired?")
                    pass

Example 22

Project: fail2ban Source File: transmitter.py
Function: command_set
	def __commandSet(self, command, multiple=False):
		name = command[0]
		# Logging
		if name == "loglevel":
			value = command[1]
			self.__server.setLogLevel(value)
			return self.__server.getLogLevel()
		elif name == "logtarget":
			value = command[1]
			if self.__server.setLogTarget(value):
				return self.__server.getLogTarget()
			else:
				raise Exception("Failed to change log target")
		elif name == "syslogsocket":
			value = command[1]
			if self.__server.setSyslogSocket(value):
				return self.__server.getSyslogSocket()
			else:
				raise Exception("Failed to change syslog socket")
		#Database
		elif name == "dbfile":
			self.__server.setDatabase(command[1])
			db = self.__server.getDatabase()
			if db is None:
				return None
			else:
				return db.filename
		elif name == "dbpurgeage":
			db = self.__server.getDatabase()
			if db is None:
				logSys.warning("dbpurgeage setting was not in effect since no db yet")
				return None
			else:
				db.purgeage = command[1]
				return db.purgeage
		# Jail
		elif command[1] == "idle":
			if command[2] == "on":
				self.__server.setIdleJail(name, True)
			elif command[2] == "off":
				self.__server.setIdleJail(name, False)
			else:
				raise Exception("Invalid idle option, must be 'on' or 'off'")
			return self.__server.getIdleJail(name)
		# Filter
		elif command[1] == "addignoreip":
			value = command[2]
			self.__server.addIgnoreIP(name, value)
			return self.__server.getIgnoreIP(name)
		elif command[1] == "delignoreip":
			value = command[2]
			self.__server.delIgnoreIP(name, value)
			return self.__server.getIgnoreIP(name)
		elif command[1] == "ignorecommand":
			value = command[2]
			self.__server.setIgnoreCommand(name, value)
			return self.__server.getIgnoreCommand(name)
		elif command[1] == "addlogpath":
			value = command[2]
			tail = False
			if len(command) == 4:
				if command[3].lower()  == "tail":
					tail = True
				elif command[3].lower() != "head":
					raise ValueError("File option must be 'head' or 'tail'")
			elif len(command) > 4:
				raise ValueError("Only one file can be added at a time")
			self.__server.addLogPath(name, value, tail)
			return self.__server.getLogPath(name)
		elif command[1] == "dellogpath":
			value = command[2]
			self.__server.delLogPath(name, value)
			return self.__server.getLogPath(name)
		elif command[1] == "logencoding":
			value = command[2]
			self.__server.setLogEncoding(name, value)
			return self.__server.getLogEncoding(name)
		elif command[1] == "addjournalmatch": # pragma: systemd no cover
			value = command[2:]
			self.__server.addJournalMatch(name, value)
			return self.__server.getJournalMatch(name)
		elif command[1] == "deljournalmatch": # pragma: systemd no cover
			value = command[2:]
			self.__server.delJournalMatch(name, value)
			return self.__server.getJournalMatch(name)
		elif command[1] == "addfailregex":
			value = command[2]
			self.__server.addFailRegex(name, value, multiple=multiple)
			if multiple:
				return True
			return self.__server.getFailRegex(name)
		elif command[1] == "delfailregex":
			value = int(command[2])
			self.__server.delFailRegex(name, value)
			return self.__server.getFailRegex(name)
		elif command[1] == "addignoreregex":
			value = command[2]
			self.__server.addIgnoreRegex(name, value, multiple=multiple)
			if multiple:
				return True
			return self.__server.getIgnoreRegex(name)
		elif command[1] == "delignoreregex":
			value = int(command[2])
			self.__server.delIgnoreRegex(name, value)
			return self.__server.getIgnoreRegex(name)
		elif command[1] == "usedns":
			value = command[2]
			self.__server.setUseDns(name, value)
			return self.__server.getUseDns(name)
		elif command[1] == "findtime":
			value = command[2]
			self.__server.setFindTime(name, value)
			return self.__server.getFindTime(name)
		elif command[1] == "datepattern":
			value = command[2]
			self.__server.setDatePattern(name, value)
			return self.__server.getDatePattern(name)
		elif command[1] == "maxretry":
			value = command[2]
			self.__server.setMaxRetry(name, int(value))
			return self.__server.getMaxRetry(name)
		elif command[1] == "maxlines":
			value = command[2]
			self.__server.setMaxLines(name, int(value))
			return self.__server.getMaxLines(name)
		# command
		elif command[1] == "bantime":
			value = command[2]
			self.__server.setBanTime(name, value)
			return self.__server.getBanTime(name)
		elif command[1] == "banip":
			value = command[2]
			return self.__server.setBanIP(name,value)
		elif command[1] == "unbanip":
			value = command[2]
			self.__server.setUnbanIP(name, value)
			return value
		elif command[1] == "addaction":
			args = [command[2]]
			if len(command) > 3:
				args.extend([command[3], json.loads(command[4])])
			self.__server.addAction(name, *args)
			return args[0]
		elif command[1] == "delaction":
			value = command[2]
			self.__server.delAction(name, value)
			return None
		elif command[1] == "action":
			actionname = command[2]
			action = self.__server.getAction(name, actionname)
			if multiple:
				for cmd in command[3]:
					logSys.log(5, "  %r", cmd)
					actionkey = cmd[0]
					if callable(getattr(action, actionkey, None)):
						actionvalue = json.loads(cmd[1]) if len(cmd)>1 else {}
						getattr(action, actionkey)(**actionvalue)
					else:
						actionvalue = cmd[1]
						setattr(action, actionkey, actionvalue)
				return True
			else:
				actionkey = command[3]
				if callable(getattr(action, actionkey, None)):
					actionvalue = json.loads(command[4]) if len(command)>4 else {}
					return getattr(action, actionkey)(**actionvalue)
				else:
					actionvalue = command[4]
					setattr(action, actionkey, actionvalue)
					return getattr(action, actionkey)
		raise Exception("Invalid command (no set action or not yet implemented)")

Example 23

Project: rapidpro Source File: tests.py
    def test_category_results(self):
        self.setup_color_gender_flow()

        # create a state field:
        # assign c1 and c2 to Kigali
        ContactField.get_or_create(self.org, self.admin, 'state', label="State", value_type=Value.TYPE_STATE)
        ContactField.get_or_create(self.org, self.admin, 'district', label="District", value_type=Value.TYPE_DISTRICT)

        self.c1.set_field(self.user, 'state', "Kigali City")
        self.c1.set_field(self.user, 'district', "Nyarugenge")
        self.c2.set_field(self.user, 'state', "Kigali City")
        self.c2.set_field(self.user, 'district', "Nyarugenge")

        self.run_color_gender_flow(self.c1, "red", "male", "16")
        self.run_color_gender_flow(self.c2, "blue", "female", "19")
        self.run_color_gender_flow(self.c3, "green", "male", "75")
        self.run_color_gender_flow(self.c4, "maroon", "female", "50")

        # create a group of the women
        ladies = self.create_group("Ladies", [self.c2, self.c4])

        # get our rulesets
        color = RuleSet.objects.get(flow=self.flow, label="Color")
        gender = RuleSet.objects.get(flow=self.flow, label="Gender")
        age = RuleSet.objects.get(flow=self.flow, label="Age")

        # fetch our results through the view
        self.login(self.admin)
        response = self.client.get(reverse('flows.ruleset_results', args=[color.pk]))
        response = json.loads(response.content)

        categories = response['results'][0]['categories']
        self.assertEqual('Red', categories[0]['label'])
        self.assertEqual('Blue', categories[1]['label'])
        self.assertEqual('Green', categories[2]['label'])

        self.assertEqual(2, categories[0]['count'])
        self.assertEqual(1, categories[1]['count'])
        self.assertEqual(1, categories[2]['count'])

        # categories should be in the same order as our rules, should have correct counts
        result = Value.get_value_summary(ruleset=color)[0]
        self.assertEquals(3, len(result['categories']))
        self.assertFalse(result['open_ended'])
        self.assertResult(result, 0, "Red", 2)
        self.assertResult(result, 1, "Blue", 1)
        self.assertResult(result, 2, "Green", 1)

        # check our age category as well
        result = Value.get_value_summary(ruleset=age)[0]
        self.assertEquals(3, len(result['categories']))
        self.assertFalse(result['open_ended'])
        self.assertResult(result, 0, "Child", 1)
        self.assertResult(result, 1, "Adult", 2)
        self.assertResult(result, 2, "Senior", 1)

        # and our gender categories
        result = Value.get_value_summary(ruleset=gender)[0]
        self.assertEquals(2, len(result['categories']))
        self.assertFalse(result['open_ended'])
        self.assertResult(result, 0, "Male", 2)
        self.assertResult(result, 1, "Female", 2)

        # now filter the results and only get responses by men
        result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"])])[0]
        self.assertResult(result, 0, "Red", 1)
        self.assertResult(result, 1, "Blue", 0)
        self.assertResult(result, 2, "Green", 1)

        # what about men that are adults?
        result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"]),
                                         dict(ruleset=age.pk, categories=["Adult"])])[0]
        self.assertResult(result, 0, "Red", 0)
        self.assertResult(result, 1, "Blue", 0)
        self.assertResult(result, 2, "Green", 0)

        # union of all genders
        result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male", "Female"]),
                                         dict(ruleset=age.pk, categories=["Adult"])])[0]

        self.assertResult(result, 0, "Red", 1)
        self.assertResult(result, 1, "Blue", 1)
        self.assertResult(result, 2, "Green", 0)

        # just women adults by group
        result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0]

        self.assertResult(result, 0, "Red", 1)
        self.assertResult(result, 1, "Blue", 1)
        self.assertResult(result, 2, "Green", 0)

        # remove one of the women from the group
        ladies.update_contacts(self.user, [self.c2], False)

        # get a new summary
        result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0]

        self.assertResult(result, 0, "Red", 1)
        self.assertResult(result, 1, "Blue", 0)
        self.assertResult(result, 2, "Green", 0)

        # ok, back in she goes
        ladies.update_contacts(self.user, [self.c2], True)

        # do another run for contact 1
        self.run_color_gender_flow(self.c1, "blue", "male", "16")

        # totals should reflect the new value, not the old
        result = Value.get_value_summary(ruleset=color)[0]
        self.assertResult(result, 0, "Red", 1)
        self.assertResult(result, 1, "Blue", 2)
        self.assertResult(result, 2, "Green", 1)

        # what if we do a partial run?
        self.send_message(self.flow, "red", contact=self.c1, restart_participants=True)

        # should change our male/female breakdown since c1 now no longer has a gender
        result = Value.get_value_summary(ruleset=gender)[0]
        self.assertEquals(2, len(result['categories']))
        self.assertResult(result, 0, "Male", 1)
        self.assertResult(result, 1, "Female", 2)

        # back to a full flow
        self.run_color_gender_flow(self.c1, "blue", "male", "16")

        # ok, now segment by gender
        result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(ruleset=gender.pk, categories=["Male", "Female"]))
        male_result = result[0]
        self.assertResult(male_result, 0, "Red", 0)
        self.assertResult(male_result, 1, "Blue", 1)
        self.assertResult(male_result, 2, "Green", 1)

        female_result = result[1]
        self.assertResult(female_result, 0, "Red", 1)
        self.assertResult(female_result, 1, "Blue", 1)
        self.assertResult(female_result, 2, "Green", 0)

        # segment by gender again, but use the contact field to do so
        result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(contact_field="Gender", values=["MALE", "Female"]))
        male_result = result[0]
        self.assertResult(male_result, 0, "Red", 0)
        self.assertResult(male_result, 1, "Blue", 1)
        self.assertResult(male_result, 2, "Green", 1)

        female_result = result[1]
        self.assertResult(female_result, 0, "Red", 1)
        self.assertResult(female_result, 1, "Blue", 1)
        self.assertResult(female_result, 2, "Green", 0)

        # add in a filter at the same time
        result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=color.pk, categories=["Blue"])],
                                         segment=dict(ruleset=gender.pk, categories=["Male", "Female"]))

        male_result = result[0]
        self.assertResult(male_result, 0, "Red", 0)
        self.assertResult(male_result, 1, "Blue", 1)
        self.assertResult(male_result, 2, "Green", 0)

        female_result = result[1]
        self.assertResult(female_result, 0, "Red", 0)
        self.assertResult(female_result, 1, "Blue", 1)
        self.assertResult(female_result, 2, "Green", 0)

        # ok, try segmenting by location instead
        result = Value.get_value_summary(ruleset=color, segment=dict(location="State"))

        eastern_result = result[0]
        self.assertEquals('171591', eastern_result['boundary'])
        self.assertEquals('Eastern Province', eastern_result['label'])
        self.assertResult(eastern_result, 0, "Red", 0)
        self.assertResult(eastern_result, 1, "Blue", 0)
        self.assertResult(eastern_result, 2, "Green", 0)

        kigali_result = result[1]
        self.assertEquals('1708283', kigali_result['boundary'])
        self.assertEquals('Kigali City', kigali_result['label'])
        self.assertResult(kigali_result, 0, "Red", 0)
        self.assertResult(kigali_result, 1, "Blue", 2)
        self.assertResult(kigali_result, 2, "Green", 0)

        # updating state location leads to updated data
        self.c2.set_field(self.user, 'state', "Eastern Province")
        result = Value.get_value_summary(ruleset=color, segment=dict(location="State"))

        eastern_result = result[0]
        self.assertEquals('171591', eastern_result['boundary'])
        self.assertEquals('Eastern Province', eastern_result['label'])
        self.assertResult(eastern_result, 0, "Red", 0)
        self.assertResult(eastern_result, 1, "Blue", 1)
        self.assertResult(eastern_result, 2, "Green", 0)

        kigali_result = result[1]
        self.assertEquals('1708283', kigali_result['boundary'])
        self.assertEquals('Kigali City', kigali_result['label'])
        self.assertResult(kigali_result, 0, "Red", 0)
        self.assertResult(kigali_result, 1, "Blue", 1)
        self.assertResult(kigali_result, 2, "Green", 0)

        # segment by district instead
        result = Value.get_value_summary(ruleset=color, segment=dict(parent="1708283", location="District"))

        # only on district in kigali
        self.assertEquals(1, len(result))
        kigali_result = result[0]
        self.assertEquals('3963734', kigali_result['boundary'])
        self.assertEquals('Nyarugenge', kigali_result['label'])
        self.assertResult(kigali_result, 0, "Red", 0)
        self.assertResult(kigali_result, 1, "Blue", 2)
        self.assertResult(kigali_result, 2, "Green", 0)

        # do a sanity check on our choropleth view
        self.login(self.admin)
        response = self.client.get(reverse('flows.ruleset_choropleth', args=[color.pk]) +
                                   "?_format=json&boundary=" + self.org.country.osm_id)

        # response should be valid json
        response = json.loads(response.content)

        # should have breaks
        self.assertTrue('breaks' in response)

        # should have two categories, Blue and Others
        self.assertEquals(2, len(response['categories']))
        self.assertEquals("Blue", response['categories'][0])
        self.assertEquals("Others", response['categories'][1])

        # assert our kigali result
        kigali_result = response['scores']['1708283']
        self.assertEquals(1, kigali_result['score'])
        self.assertEquals("Kigali City", kigali_result['name'])
        self.assertEquals("Blue", kigali_result['results'][0]['label'])
        self.assertEquals("Others", kigali_result['results'][1]['label'])

        self.assertEquals(1, kigali_result['results'][0]['count'])
        self.assertEquals(0, kigali_result['results'][1]['count'])

        self.assertEquals(100, kigali_result['results'][0]['percentage'])
        self.assertEquals(0, kigali_result['results'][1]['percentage'])

        with patch('temba.values.models.Value.get_value_summary') as mock:
            mock.return_value = []

            response = self.client.get(reverse('flows.ruleset_choropleth', args=[color.pk]) +
                                       "?_format=json&boundary=" + self.org.country.osm_id)

            # response should be valid json
            response = json.loads(response.content)

            # should have two categories, Blue and Others
            self.assertEquals(2, len(response['categories']))
            self.assertEquals("", response['categories'][0])
            self.assertEquals("", response['categories'][1])

            # all counts and percentage are 0
            self.assertEquals(0, response['totals']['count'])
            self.assertEquals(0, response['totals']['results'][0]['count'])
            self.assertEquals(0, response['totals']['results'][0]['percentage'])
            self.assertEquals(0, response['totals']['results'][1]['count'])
            self.assertEquals(0, response['totals']['results'][1]['percentage'])

            # and empty string labels
            self.assertEquals("", response['totals']['results'][0]['label'])
            self.assertEquals("", response['totals']['results'][1]['label'])

        # also check our analytics view
        response = self.client.get(reverse('flows.ruleset_analytics'))

        # make sure we have only one flow in it
        flows = json.loads(response.context['flows'])
        self.assertEquals(1, len(flows))
        self.assertEquals(3, len(flows[0]['rules']))

Example 24

Project: labmanager Source File: opensocial.py
def _reserve_impl(lab_name, public_rlms = False, public_lab = False, institution_id = None, rlms_identifier = None, gadget_url_base = None):
    # TODO XXX SECURITY BUG: THIS METHOD DOES NOT USE THE BOOKING THING
    st = request.args.get('st') or ''
    SHINDIG.url = 'http://shindig2.epfl.ch'

    if public_rlms:
        db_rlms = db.session.query(RLMS).filter_by(publicly_available = True, public_identifier = rlms_identifier).first()
        if db_rlms is None:
            return render_template("opensocial/errors.html", message = gettext("That lab does not exist or it is not publicly available."))
        lab_identifier = lab_name

        ple_configuration = '{}'
        institution_name  = 'public-labs' # TODO: make sure that this name is unique
        courses_configurations = []
        booking_required = False
    else:
        if public_lab:
            db_laboratory = db.session.query(Laboratory).filter_by(publicly_available = True, public_identifier = lab_name).first()
            if db_laboratory is None:
                return render_template("opensocial/errors.html", message = gettext("That lab does not exist or it is not publicly available."))
            
            ple_configuration = '{}'
            institution_name  = 'public-labs' # TODO: make sure that this name is unique
            courses_configurations = []
        else:
            institution = db.session.query(LearningTool).filter_by(name = institution_id).first()
            if institution is None or len(institution.shindig_credentials) < 1:
                return render_template("opensocial/errors.html", message = gettext("This is not a valid PLE. Make sure that the institution id is fine and that there are Shindig Credentials configured"))

            SHINDIG.url = institution.shindig_credentials[0].shindig_url

            # Obtain current application data (especially, on which space is the user running it)
            current_app_str  = urllib2.urlopen(url_shindig('/rest/apps/@self?st=%s' % st)).read()
            current_app_data = json.loads(current_app_str)
            space_id = current_app_data['entry'].get('parentId') or 'null parent'
            parent_type = current_app_data['entry'].get('parentType')
            if parent_type != '@space':
                return render_template("opensocial/errors.html", message = gettext("Invalid parent: it should be a space, and it is a %(parenttype)s", parenttype=parent_type))
            # Obtain the list of parent spaces of that space
            spaces = [space_id]
            get_parent_spaces(space_id, spaces)
            # Now, check permissions:
            # First, check if the lab is public (e.g. the lab can be accessed by anyone)
            # Second, check accesibility permissions (e.g. the lab is accessible for everyone from that institution without specifying any Graasp space). 
            # After that, in the case that there are not accesibility permissions, check for that institution if there is a permission identified by that lab_name, and check which courses (spaces in OpenSocial) have that permission.
            public_lab_db = db.session.query(Laboratory).filter_by(public_identifier = lab_name, publicly_available = True).first()
            courses_configurations = []
            if public_lab_db is None:
                # No public access is granted for the lab, check accesibility permissions
                accessible_permission = db.session.query(PermissionToLt).filter_by(lt = institution, local_identifier = lab_name, accessible = True).first()
                if accessible_permission is None:
                    permission = db.session.query(PermissionToLt).filter_by(lt = institution, local_identifier = lab_name).first()
                    if permission is None:
                        return render_template("opensocial/errors.html", message = gettext("Your PLE is valid, but don't have permissions for the requested laboratory."))
                    for course_permission in permission.course_permissions:
                        if course_permission.course.context_id in spaces:
                            # Let the server choose among the best possible configuration
                            courses_configurations.append(course_permission.configuration)
                    if len(courses_configurations) == 0:
                        return render_template("opensocial/errors.html", message = gettext("Your PLE is valid and your lab too, but you're not in one of the spaces that have permissions (you are in %(space)r)", space=spaces))
                else:
                    # There is a accesibility permission for that lab and institution
                    permission = accessible_permission

                ple_configuration = permission.configuration
                db_laboratory     = permission.laboratory
                institution_name  = institution.name
            else: 
                # There is a public permission for the lab
                ple_configuration = []
                db_laboratory     = public_lab_db
                institution_name  = institution.name

        booking_required = db_laboratory.go_lab_reservation
        lab_identifier = db_laboratory.laboratory_id
        db_rlms = db_laboratory.rlms

    if booking_required:
        next_session = check_ils_booking(gadget_url_base)
        if next_session is not None:
            return render_template("opensocial/errors-booking.html", next_session = next_session)

    # Obtain user data
    if st == 'null' and (public_lab or public_rlms):
        user_id = 'no-id'
    else:
        try:
            current_user_str  = urllib2.urlopen(url_shindig("/rest/people/@me/@self?st=%s" % st)).read()
            current_user_data = json.loads(current_user_str)
        except:
            traceback.print_exc()
            if public_lab or public_rlms:
                user_id = 'no-id'
            else:
                return render_template("opensocial/errors.html", message = gettext("Could not connect to %(urlshindig)s.", urlshindig=url_shindig("/rest/people/@me/@self?st=%s" % st)))
        else:
            # name    = current_user_data['entry'].get('displayName') or 'anonymous'
            user_id = current_user_data['entry'].get('id') or 'no-id'

    rlms_version      = db_rlms.version
    rlms_kind         = db_rlms.kind
    user_agent = unicode(request.user_agent)
    origin_ip  = remote_addr()
    referer    = request.referrer
    # Load the plug-in for the current RLMS, and instanciate it
    ManagerClass = get_manager_class(rlms_kind, rlms_version, db_rlms.id)
    remote_laboratory = ManagerClass(db_rlms.configuration)

    kwargs = {}

    locale = request.args.get('locale') or None
    if locale:
        kwargs['locale'] = locale

    lab_config = request.args.get('lab_config')
    try:
        lab_config = urllib.unquote(lab_config)
        json.loads(lab_config) # Verify that it's a valid JSON
    except:
        lab_config = '{}'
    if lab_config:
        request_payload = { 'initial' : lab_config }
    else:
        request_payload = {}

    try:
        response = remote_laboratory.reserve(laboratory_id                = lab_identifier,
                                                username                  = user_id,
                                                institution               = institution_name,
                                                general_configuration_str = ple_configuration,
                                                particular_configurations = courses_configurations,
                                                request_payload           = request_payload,
                                                user_properties           = {
                                                    'user_agent' : user_agent,
                                                    'from_ip'    : origin_ip,
                                                    'referer'    : referer
                                                },
                                                back = url_for('.reload', _external = True),
                                                **kwargs)
    except Exception as e:
        app.logger.error("Error processing request: %s" % e, exc_info = True)
        traceback.print_exc()
        # Don't translate, just in case there are issues with the problem itself
        return render_template("opensocial/errors.html", message = "There was an error performing the reservation to the final laboratory.")
    else:
        if Capabilities.WIDGET in remote_laboratory.get_capabilities():
            reservation_id = response['reservation_id']
        else:
            reservation_id = response['load_url']

        quoted_reservation_id = urllib2.quote(reservation_id, '')
        g4l_session_id = "{0}-{1}-{2}".format(quoted_reservation_id, time.time(), str(random.randint(0, 9999)).zfill(4))

        return render_template("opensocial/confirmed.html", reservation_id = quoted_reservation_id, g4l_session_id = g4l_session_id, shindig_url = SHINDIG.url)

Example 25

Project: viewfinder Source File: analyze_merged_logs.py
@gen.engine
def ProcessFiles(merged_store, logs_paths, filenames, callback):
  """Fetch and process each file contained in 'filenames'."""

  def _ProcessOneFile(contents, day_stats, device_entries, trace_entries):
    """Iterate over the contents of a processed file: one entry per line. Increment stats for specific entries."""
    buf = cStringIO.StringIO(contents)
    buf.seek(0)
    # Max len is +1 since we include the current line. It allows us to call 'continue' in the middle of the loop.
    context_before = deque(maxlen=options.options.trace_context_num_lines + 1)
    # Traces that still need "after" context.
    pending_traces = []
    def _AddTrace(trace_type, timestamp, module, message):
      # context_before also has the current line, so grab only :-1.
      trace = {'type': trace_type,
               'timestamp': timestamp,
               'module': module,
               'trace': msg,
               'context_before': list(context_before)[:-1],
               'context_after': []}
      if options.options.trace_context_num_lines == 0:
        trace_entries.append(trace)
      else:
        pending_traces.append(trace)

    def _CheckPendingTraces(line):
      for t in pending_traces:
        t['context_after'].append(line)
      while pending_traces and len(pending_traces[0]['context_after']) >= options.options.trace_context_num_lines:
        trace_entries.append(pending_traces.pop(0))

    while True:
      line = buf.readline()
      if not line:
        break
      line = line.rstrip('\n')
      # The deque automatically pops elements from the front when maxlen is reached.
      context_before.append(line)
      _CheckPendingTraces(line)

      parsed = logs_util.ParseLogLine(line)
      if not parsed:
        continue
      day, time, module, msg = parsed
      timestamp = logs_util.DayTimeStringsToUTCTimestamp(day, time)

      if options.options.process_traceback and re.search(kTracebackRE, line):
        _AddTrace('traceback', timestamp, module, msg)

      if module.startswith('user_op_manager:') or module.startswith('operation:'):
        # Found op status line.
        if msg.startswith('SUCCESS'):
          # Success message. eg: SUCCESS: user: xx, device: xx, op: xx, method: xx.yy in xxs
          parsed = logs_util.ParseSuccessMsg(msg)
          if not parsed:
            continue
          user, device, op, class_name, method_name = parsed
          method = '%s.%s' % (class_name, method_name)
          day_stats.ActiveAll(user)
          if method in ('Follower.UpdateOperation', 'UpdateFollowerOperation.Execute'):
            day_stats.ActiveView(user)
          elif method in ('Comment.PostOperation', 'PostCommentOperation.Execute'):
            day_stats.ActivePost(user)
          elif method in ('Episode.ShareExistingOperation', 'Episode.ShareNewOperation',
                          'ShareExistingOperation.Execute', 'ShareNewOperation.Execute'):
            day_stats.ActiveShare(user)
        elif msg.startswith('EXECUTE'):
          # Exec message. eg: EXECUTE: user: xx, device: xx, op: xx, method: xx.yy: <req>
          parsed = logs_util.ParseExecuteMsg(msg)
          if not parsed:
            continue
          user, device, op, class_name, method_name, request = parsed
          method = '%s.%s' % (class_name, method_name)
          if method in ('Device.UpdateOperation', 'User.RegisterOperation', 'RegisterUserOperation.Execute'):
            try:
              req_dict = eval(request)
              device_entries.append({'method': method, 'timestamp': timestamp, 'request': req_dict})
            except Exception as e:
              continue
        elif msg.startswith('ABORT'):
          if options.options.process_op_abort:
            # Abort message, save the entire line as well as context.
            _AddTrace('abort', timestamp, module, msg)
        # FAILURE status is already handled by Traceback processing.
      elif module.startswith('base:') and msg.startswith('/ping OK:'):
        # Ping message. Extract full request dict.
        req_str = logs_util.ParsePingMsg(msg)
        if not req_str:
          continue
        try:
          req_dict = json.loads(req_str)
          device_entries.append({'method': 'ping', 'timestamp': timestamp, 'request': req_dict})
        except Exception as e:
          continue
      elif module.startswith('ping:') and msg.startswith('ping OK:'):
        # Ping message in new format. Extract full request and response dicts.
        (req_str, resp_str) = logs_util.ParseNewPingMsg(msg)
        if not req_str or not resp_str:
          continue
        try:
          req_dict = json.loads(req_str)
          resp_dict = json.loads(resp_str)
          device_entries.append({'method': 'ping', 'timestamp': timestamp, 'request': req_dict, 'response': resp_dict})
        except Exception as e:
          continue


    # No more context. Flush the pending traces into the list.
    trace_entries.extend(pending_traces)
    buf.close()

  today = util.NowUTCToISO8601()
  # Group filenames by day.
  files_by_day = defaultdict(list)
  for filename in filenames:
    day = logs_paths.MergedLogPathToDate(filename)
    if not day:
      logging.error('filename cannot be parsed as processed log: %s' % filename)
      continue
    if options.options.compute_today or today != day:
      files_by_day[day].append(filename)

  # Sort the list of days. This is important both for --max_days_to_process, and to know the last
  # day for which we wrote the file.
  day_list = sorted(files_by_day.keys())
  if options.options.max_days_to_process is not None:
    day_list = day_list[:options.options.max_days_to_process]

  last_day_written = None
  for day in day_list:
    files = files_by_day[day]
    day_stats = logs_util.DayUserRequestStats(day)
    device_entries = []
    trace_entries = []
    for f in files:
      # Let exceptions surface.
      contents = yield gen.Task(merged_store.Get, f)
      logging.info('Processing %d bytes from %s' % (len(contents), f))
      _ProcessOneFile(contents, day_stats, device_entries, trace_entries)

    if not options.options.dry_run:
      # Write the json-ified stats.
      req_contents = json.dumps(day_stats.ToDotDict())
      req_file_path = 'processed_data/user_requests/%s' % day
      dev_contents = json.dumps(device_entries)
      dev_file_path = 'processed_data/device_details/%s' % day
      try:
        trace_contents = json.dumps(trace_entries)
      except Exception as e:
        trace_contents = None
      trace_file_path = 'processed_data/traces/%s' % day


      @gen.engine
      def _MaybePut(path, contents, callback):
        if contents:
          yield gen.Task(merged_store.Put, path, contents)
          logging.info('Wrote %d bytes to %s' % (len(contents), path))
        callback()


      yield [gen.Task(_MaybePut, req_file_path, req_contents),
             gen.Task(_MaybePut, dev_file_path, dev_contents),
             gen.Task(_MaybePut, trace_file_path, trace_contents)]

      last_day_written = day_stats.day

  callback(last_day_written)
  return

Example 26

Project: api-samples Source File: 03_FlowRegexProperties.py
def main():

    # create the api client
    client = client_module.RestApiClient(version='7.0')

    # -------------------------------------------------------------------------
    # 1. get a list of flow regex property
    endpoint_url = 'config/flow_sources/custom_properties/regex_properties'
    http_method = 'GET'

    # select fields to return for each flow regex property
    fields = 'id, name, property_type'

    # use filter to select desired flow regex property
    query_filter = 'property_type = "numeric"'

    # populate the optional parameters to be used in request
    params = {'fields': fields, 'filter': query_filter}

    # put range in header for paging purpose
    headers = {'range': 'items=0-4'}

    # send the request
    response = client.call_api(endpoint_url, http_method, params=params,
                               headers=headers, print_request=True)

    # check response and handle any error
    if response.code == 200:
        regex_properties = json.loads(response.read().decode('utf-8'))

        # go through the list of flow regex properties and print each
        for regex_property in regex_properties:
            print(regex_property)

    else:
        SampleUtilities.pretty_print_response(response)
        print('Failed to retrieve the list of flow regex properties')
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 2. create a new flow regex property

    endpoint_url = 'config/flow_sources/custom_properties/regex_properties'
    http_method = 'POST'

    # sample flow regex property, be sure to change the name if running
    # multiple times.
    new_regex_property = {
                          "name": "Sample flow regex property x",
                          "description": "description property",
                          "property_type": "numeric",
                          "use_for_rule_engine": True,
                          }

    data = json.dumps(new_regex_property).encode('utf-8')

    headers = {'Content-type': 'application/json'}

    # send the request
    response = client.call_api(endpoint_url, http_method, data=data,
                               headers=headers, print_request=True)

    # check response and handle any error
    if response.code == 201:
        print('A new flow regex property is created.')
        # can extract newly created flow regex property from the response
        regex_property = json.loads(response.read().decode('utf-8'))
        print(json.dumps(regex_property, indent=4))
    else:
        print('Failed to create the new flow regex property')
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 3. get a single flow regex property by id

    # id of the flow regex property, using the one created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/flow_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id))
    http_method = 'GET'

    # send the request
    response = client.call_api(endpoint_url, http_method, print_request=True)

    # check response and handle any error
    if response.code == 200:
        print("The requested flow regex property has been retrieved.")
        regex_property = json.loads(response.read().decode('utf-8'))
        print(json.dumps(regex_property, indent=4))
    else:
        print('Failed to retrieve the flow regex property with id=' +
              str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 4. update a flow regex property by its id

    # using flow regex property created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/flow_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id))
    http_method = 'POST'

    fields_to_update = {
                        "description": "updated description",
                        "use_for_rule_engine": False,
                        }

    data = json.dumps(fields_to_update).encode('utf-8')

    headers = {'Content-type': 'application/json'}

    # send the request
    response = client.call_api(endpoint_url, http_method, data=data,
                               headers=headers, print_request=True)

    if response.code == 200:
        print('The flow regex property has been successfully updated.')
        regex_property = json.loads(response.read().decode('utf-8'))
        print(json.dumps(regex_property, indent=4))
    else:
        print('Failed to update the flow regex property with id=' +
              str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 5. find dependents of a flow regex property

    # using flow regex property created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/flow_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id)) + '/dependents'
    http_method = 'GET'

    # send the request
    response = client.call_api(endpoint_url, http_method, print_request=True)

    if response.code == 202:
        print('The find dependents task for flow regex property has started.')
        task_status = json.loads(response.read().decode('utf-8'))
        print(json.dumps(task_status, indent=4))

        task_status_url = ('/config/flow_sources/custom_properties/' +
                           'regex_property_dependent_tasks' + '/' +
                           str(task_status['id']))

        task_manager = TaskManager(client, task_status_url)

        try:
            task_manager.wait_for_task_to_complete(60)

            # query the result endpoint for results

            endpoint_url = ('config/flow_sources/custom_properties/' +
                            'regex_property_dependent_tasks' + '/' +
                            str(task_status['id']) + '/results')
            http_method = 'GET'

            response = client.call_api(endpoint_url, http_method,
                                       print_request=True)

            # check response and handle any error
            if response.code == 200:
                task_result = json.loads(response.read().decode('utf-8'))
                print(json.dumps(task_result, indent=4))

            else:
                SampleUtilities.pretty_print_response(response)
                print('Failed to retrieve the result of find dependents task.')
                sys.exit(1)

        except TimeoutError:
            print("Find dependents task time out. Current status is:")
            SampleUtilities.pretty_print_response(
                              task_manager.get_task_status()
                              )

    else:
        print('Failed to start a find dependents task for ' +
              'flow regex property with id=' + str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 6. delete a flow regex property

    # using flow regex property created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/flow_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id))
    http_method = 'DELETE'

    # send the request
    response = client.call_api(endpoint_url, http_method, print_request=True)

    if response.code == 202:
        print('The deletion task for flow regex property has started.')
        task_status = json.loads(response.read().decode('utf-8'))
        print(json.dumps(task_status, indent=4))

        task_status_url = ('/config/flow_sources/custom_properties/' +
                           'regex_property_delete_tasks' + '/' +
                           str(task_status['id']))

        task_manager = TaskManager(client, task_status_url)

        try:
            task_manager.wait_for_task_to_complete(60)
        except TimeoutError:
            print("Deletion task time out. Current status is:")
            SampleUtilities.pretty_print_response(
                              task_manager.get_task_status()
                              )

    else:
        print('Failed to start a deletion task for ' +
              'flow regex property with id=' + str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

Example 27

Project: api-samples Source File: 04_postCustomActions.py
def main():
    # Create our client.
    rest_client = client_module.RestApiClient(version='6.0')
    # Endpoints used in this sample
    scripts_endpoint = 'analytics/custom_actions/scripts'
    actions_endpoint = 'analytics/custom_actions/actions'
    # Variable to hold the root path to the custom actions sample folder
    root_path = os.path.dirname(os.path.realpath(__file__))
    # Script file name & path to where it is stored
    file_name = 'python_sample.py'
    file_path = os.path.join(root_path, 'custom_action_samples', file_name)

    # Opening script file in local file system
    with open(file_path) as script:
        # Adding a request header to contain the file name
        # Also setting content-type header to application/octet-stream
        request_header = rest_client.headers.copy()
        request_header['file_name'] = file_name
        request_header['Content-Type'] = 'application/octet-stream'
        # Reading the content of the script file & encoding it for use
        # with the endpoint.
        script_data = script.read()
        script_data_encoded = str.encode(script_data)

    SampleUtilities.pretty_print_request(rest_client,
                                         scripts_endpoint,
                                         'POST')
    # Calling scripts endpoint to POST script file.
    response = rest_client.call_api(scripts_endpoint,
                                    'POST',
                                    headers=request_header,
                                    data=script_data_encoded)
    # Checking for a successful response code.
    if response.code != 201:
        print('Failed to POST custom action script to the server')
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    script_response = json.loads(response.read().decode('utf-8'))
    retrieved_id = str(script_response['id'])
    retrieved_name = str(script_response['file_name'])

    format_str = 'Script successfully uploaded. Values returned: id=[{0}],'\
                 ' file name=[{1}].\n'

    print(format_str.format(retrieved_id, retrieved_name))

    print("Demonstrating updating scripts via /scripts/{id} endpoint...")
    # This script id will be used with the POST /scripts/{id} endpoint
    # and with the POST /actions endpoint.
    script_id = script_response['id']

    # Demonstrating updating an existing script resource
    file_name = 'bash_sample.sh'
    file_path = os.path.join(root_path, 'custom_action_samples', file_name)

    with open(file_path) as script:
        # Adding a request header to contain the file name
        # Also setting content-type header to application/octet-stream
        request_header = rest_client.headers.copy()
        request_header['file_name'] = file_name
        request_header['Content-Type'] = 'application/octet-stream'
        # Reading the content of the script file & encoding it
        # for use with the endpoint.
        script_data = script.read()
        script_data_encoded = str.encode(script_data)
    # Updating endpoint to include /{id}.
    scripts_endpoint += '/' + str(script_id)
    SampleUtilities.pretty_print_request(rest_client,
                                         scripts_endpoint,
                                         'POST')
    # Calling the POST /scripts/{id} endpoint to
    # update the script resource.
    response = rest_client.call_api(scripts_endpoint,
                                    'POST',
                                    headers=request_header,
                                    data=script_data_encoded)

    if (response.code != 200):
        print('Failed to POST updated custom action script to the server')
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)
    # Extracting script id and file name from the response.
    script_response = json.loads(response.read().decode('utf-8'))
    retrieved_id = str(script_response['id'])
    retrieved_name = str(script_response['file_name'])

    format_str = 'Script successfully updated. Values returned: id=[{0}],'\
                 ' file name=[{1}].\n'

    print(format_str.format(retrieved_id, retrieved_name))

    # Using the script ID generated by the previous calls we can
    # now create a new custom action.
    # Custom actions are posted to the server as a complete object.
    # This is demonstrated below.

    # Dict object to contain the custom action
    custom_action = {}
    custom_action['name'] = "Custom Action Demonstration"
    custom_action['description'] = "Demonstrating POST custom action endpoint"
    # GET /interpreters can be used to return a collection of available
    # interpreters from which ids can be retrieved. But for demo purposes
    # this has been hard coded here to 1.
    custom_action['interpreter'] = 1
    # ID of script created earlier
    custom_action['script'] = script_id
    # Custom Action parameters are stored within a list object
    custom_action_params = []
    # Param dict objects to house each custom action parameter
    param1 = {}
    param1['name'] = 'demoParam1'
    # Must be either 'fixed', or 'dynamic'.
    param1['parameter_type'] = 'fixed'
    # Only fixed parameters will can be encrypted.
    # This will encrypt the value of the parameter at storage time
    param1['encrypted'] = True
    param1['value'] = 'Hello World!'

    param2 = {}
    param2['name'] = 'demoParam2'
    # The value of dynamic parameters will be replaced with the action value
    # occurring in the event which triggers
    # the rule containing the custom action
    param2['parameter_type'] = 'dynamic'
    # Dynamic parameters cannot be encrypted, if set to
    # true it will be defaulted back to false
    param2['encrypted'] = False
    # This value will be replaced with the actual source IP of the event
    # which triggered the rule containing the custom action.
    # Available dynamic parameter values can be retrieved via the
    # /api/ariel/databases/events?fields=columns(name) endpoint.
    param2['value'] = 'sourceip'

    custom_action_params.append(param1)
    custom_action_params.append(param2)

    # Adding custom action parameters to custom action
    custom_action['parameters'] = custom_action_params

    # Converting custom action object to json and
    # encoding it for use with the endpoint.
    custom_action = json.dumps(custom_action).encode()

    action_headers = rest_client.headers.copy()
    action_headers['Content-Type'] = 'application/json'

    SampleUtilities.pretty_print_request(rest_client,
                                         actions_endpoint,
                                         'POST')
    response = rest_client.call_api(actions_endpoint,
                                    'POST',
                                    headers=action_headers,
                                    data=custom_action)

    if (response.code != 201):
        print('Failed to POST custom action to the server')
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # The created custom action is returned, which will
    # have it's ID within a new field.
    action_response = json.loads(response.read().decode('utf-8'))
    action_id = action_response['id']

    print("Successfully posted custom action [returned id=" +
          str(action_id) + "].")

    action_name = str(action_response['name'])
    action_desc = str(action_response['description'])
    action_interpreter = str(action_response['interpreter'])
    action_script = str(action_response['script'])

    format_str = 'Custom action values:\n[name={0}'\
                 ', description={1} '\
                 ', interpreter={2}'\
                 ', script={3}].'

    print(format_str.format(action_name,
                            action_desc,
                            action_interpreter,
                            action_script))

    print("Parameters: ")
    for each in action_response['parameters']:

        param_name = str(each['name'])
        param_type = str(each['parameter_type'])
        param_encrypted = str(each['encrypted'])
        param_value = str(each['value'])

        format_str = '[name={0}'\
                     ', parameter_type={1}'\
                     ', encrypted={2}'\
                     ', value={3}].'

        print(format_str.format(param_name,
                                param_type,
                                param_encrypted,
                                param_value))

    print()

    # Demonstrating the POST /actions/{id} endpoint used
    # for updating custom actions

    updated_action = {}
    updated_action['id'] = action_id
    updated_action['name'] = 'Updated Demo Custom Action'
    # Interpreter & script required even
    # if they remain unchanged.
    updated_action['interpreter'] = 2
    updated_action['script'] = script_id
    # Replacing old params with a single new parameter.
    updated_action['parameters'] = [{'name': 'demoParam',
                                     'parameter_type': 'fixed',
                                     'encrypted': False,
                                     'value': 'new param'}]

    updated_action = json.dumps(updated_action).encode()
    # Appending endpoint with action id.
    actions_endpoint += '/' + str(action_id)

    SampleUtilities.pretty_print_request(rest_client,
                                         actions_endpoint,
                                         'POST')
    response = rest_client.call_api(actions_endpoint,
                                    'POST',
                                    headers=action_headers,
                                    data=updated_action)

    if (response.code != 200):
        print('Failed to POST custom action [' +
              str(action_id) + "] to the server.")
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    updated_response = json.loads(response.read().decode('utf-8'))

    print("Successfully posted updated custom action [" +
          str(action_id) + "] to the server")

    action_name = str(updated_response['name'])
    action_desc = str(updated_response['description'])
    action_interpreter = str(updated_response['interpreter'])
    action_script = str(updated_response['script'])

    format_str = 'Updated custom action values:\n [name={0}'\
                 ', description={1} '\
                 ', interpreter={2}'\
                 ', script={3}].'

    print(format_str.format(action_name,
                            action_desc,
                            action_interpreter,
                            action_script))

    print("Parameters: ")
    for each in updated_response['parameters']:

        param_name = str(each['name'])
        param_type = str(each['parameter_type'])
        param_encrypted = str(each['encrypted'])
        param_value = str(each['value'])

        format_str = '[name={0}'\
                     ', parameter_type={1}'\
                     ', encrypted={2}'\
                     ', value={3}].'

        print(format_str.format(param_name,
                                param_type,
                                param_encrypted,
                                param_value))

        print()

Example 28

Project: SmartQQ-for-Raspberry-Pi Source File: WebQQ.py
  def __init__(self, vpath, qq=0):
    self.VPath = vpath#QRCode保存路径
    self.AdminQQ = int(qq)
    logging.basicConfig(filename='qq.log', level=logging.DEBUG, format='%(asctime)s  %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='[%Y-%m-%d %H:%M:%S]')

    self.initUrl = "https://ui.ptlogin2.qq.com/cgi-bin/login?daid=164&target=self&style=16&mibao_css=m_webqq&appid=501004106&enable_qlogin=0&no_verifyimg=1&s_url=http%3A%2F%2Fw.qq.com%2Fproxy.html&f_url=loginerroralert&strong_login=1&login_state=10&t=20131024001"

    html = self.Get(self.initUrl, self.SmartQQUrl)

    self.APPID = 501004106
    MiBaoCss = "m_webqq"
    JsVer = 10149

    StarTime = self.date_to_millis(datetime.datetime.utcnow())

    T = 0
    while True:
      T = T + 1
      self.Download('https://ssl.ptlogin2.qq.com/ptqrshow?appid={0}&e=0&l=M&s=5&d=72&v=4&t=0.5462884965818375'.format(self.APPID), self.VPath)
      logging.info('[{0}] Get QRCode Picture Success.'.format(T))
      while True:
        html = self.Get('https://ssl.ptlogin2.qq.com/ptqrlogin?webqq_type=10&remember_uin=1&login2qq=1&aid={0}&u1=http%3A%2F%2Fw.qq.com%2Fproxy.html%3Flogin2qq%3D1%26webqq_type%3D10&ptredirect=0&ptlang=2052&daid=164&from_ui=1&pttype=1&dumy=&fp=loginerroralert&action=0-0-{1}&mibao_css={2}&t=undefined&g=1&js_type=0&js_ver={3}&login_sig=&pt_randsalt=0'.format(self.APPID, self.date_to_millis(datetime.datetime.utcnow()) - StarTime, MiBaoCss, JsVer), self.initUrl)
        logging.info(html)
        ret = html.split("'")
        if ret[1] == '65' or ret[1] == '0':#65: QRCode 失效, 0: 验证成功, 66: 未失效, 67: 验证中
          break
        time.sleep(2)
      if ret[1] == '0' or T > self.MaxTryTime:
        break

    logging.debug(ret)
    if ret[1] != '0':
      return

    if os.path.exists(self.VPath):#删除QRCode文件
      os.remove(self.VPath)

    html = self.Get(ret[5])

    url = self.getReValue(html, r' src="(.+?)"', 'Get mibao_res Url Error.', 0)

    if url != '':
        html = self.Get(url.replace('&amp;', '&'))
        url = self.getReValue(html, r'location\.href="(.+?)"', 'Get Redirect Url Error', 1)
        html = self.Get(url)

    self.PTWebQQ = self.getCookie('ptwebqq')

    logging.info('PTWebQQ: {0}'.format(self.PTWebQQ))

    #self.Get('http://s.web2.qq.com/proxy.html?v=20130916001&callback=1&id=1')

    while 1:
      html = self.Get('http://s.web2.qq.com/api/getvfwebqq?ptwebqq={0}&clientid={1}&psessionid=&t={2}'.format(self.PTWebQQ, self.ClientID, StarTime), self.Referer)
      logging.debug(html)
      ret = json.loads(html)

      if ret['retcode'] != 0:
        break

      self.VFWebQQ = ret['result']['vfwebqq']

      #self.Get('http://d1.web2.qq.com/proxy.html?v=20151105001&callback=1&id=2', self.SmartQQUrl)

      html = self.Post('http://d1.web2.qq.com/channel/login2', {
        'r' : '{{"ptwebqq":"{0}","clientid":{1},"psessionid":"","status":"online"}}'.format(self.PTWebQQ, self.ClientID)
      }, 'http://d1.web2.qq.com/proxy.html?v=20151105001&callback=1&id=2')

      logging.debug(html)
      ret = json.loads(html)

      if ret['retcode'] != 0:
        break

      self.PSessionID = ret['result']['psessionid']

      logging.info('Login success')

      msgId = int(random.uniform(1000, 3456)) * 10000 + 1

      E = 0
      while 1:
        html = self.Post('http://d1.web2.qq.com/channel/poll2', {
          'r' : '{{"ptwebqq":"{1}","clientid":{2},"psessionid":"{0}","key":""}}'.format(self.PSessionID, self.PTWebQQ, self.ClientID)
        }, self.Referer)

        #超时时会返回空, 所以此处如果是空, 则继续发出请求, 不用往后走下去
        if html == '':
          continue

        logging.info(html)

        try:
          ret = json.loads(html)
          E = 0
        except ValueError as e:
          logging.debug(e)
          E += 1
        except Exception as e:
          logging.debug(e)
          E += 1

        if E > 0 and E < 5:
          time.sleep(2)
          continue

        if E > 0:
          logging.debug('try auto login ...')
          break

        if ret['retcode'] == 100006:
          break
        if ret['retcode'] == 102:#无消息
          continue
        if ret['retcode'] == 116:#更新PTWebQQ值
          self.PTWebQQ = ret['p']
          continue
        if ret['retcode'] == 0 and ret.get('result'):
          for msg in ret['result']:
            msgType = msg['poll_type']
            if msgType == 'message':#QQ消息
              txt = msg['value']['content'][1]
              logging.debug(txt)
              tuin = msg['value']['from_uin']
              if not tuin in self.FriendList:#如果消息的发送者的真实QQ号码不在FriendList中,则自动去取得真实的QQ号码并保存到缓存中
                try:
                  info = json.loads(self.Get('http://s.web2.qq.com/api/get_friend_uin2?tuin={0}&type=1&vfwebqq={1}'.format(tuin, self.VFWebQQ), self.Referer))
                  logging.info(info)
                  if info['retcode'] != 0:
                    raise ValueError, info
                  info = info['result']
                  self.FriendList[tuin] = info['account']
                except Exception as e:
                  logging.debug(e)
                  continue
              if self.FriendList.get(tuin, 0) != self.AdminQQ:#如果消息的发送者与AdminQQ不相同,则忽略本条消息不往下继续执行
                continue
              if txt[0] == '#':
                  thread.start_new_thread(self.runCommand, (tuin, txt[1:].strip(), msgId))
                  msgId += 1
              if txt[0:4] == 'exit':
                exit(0)
            elif msgType == 'sess_message':#QQ临时会话的消息
              logging.debug(msg['value']['content'][1])
            elif msgType == 'group_message':#群消息
              txt = msg['value']['content'][1]
              logging.debug("QQGroup Message:" + txt)
            elif msgType == 'discu_message':#讨论组的消息
              txt = msg['value']['content'][1]
              logging.debug("Discu Message:" + txt)
            elif msgType == 'kick_message':#QQ号在另一个地方登陆,被挤下线
              logging.error(msg['value']['reason'])
              raise Exception, msg['value']['reason']#抛出异常,重新启动WebQQ,需重新扫描QRCode来完成登陆
              break
            elif msgType != 'input_notify':
              logging.debug(msg)

Example 29

Project: data-import Source File: update_assets.py
Function: main
def main():

    parser = get_parser()
    args = parser.parse_args()

    if args[0].help or not (args[0].file or args[0].fields) or not args[0].ip or not args[0].token :
        print >> sys.stderr, "A simple utility to load a CSV file with asset information into the QRadar asset model based on IP address (which must exist in QRadar)"
        print >> sys.stderr, "The first column of the first line of the file must be 'ipaddress'"
        print >> sys.stderr, "The remaining columns of the file must contain field name headers that match the asset properties being loaded"
        print >> sys.stderr, "The asset with the most recent occurrence of the ip address is updated with the remaining fields on the line"
        print >> sys.stderr, "";
        print >> sys.stderr, "example:"
        print >> sys.stderr, "";
        print >> sys.stderr, "ipaddress,Technical Owner,Location,Description"
        print >> sys.stderr, "172.16.129.128,Chris Meenan,UK,Email Server"
        print >> sys.stderr, "172.16.129.129,Joe Blogs,Altanta,Customer Database Server"
        print >> sys.stderr, "172.16.129.130,Jason Corbin,Boston,Application Server"
        print >> sys.stderr, "";
        print >> sys.stderr, parser.format_help().strip() 
        exit(0)

    # Creates instance of APIClient. It contains all of the API methods.
    api_client = RestApiClient(args)

    # retrieve all the asset fields
    print("Retrieving asset fields");
    response = api_client.call_api('asset_model/properties', 'GET',None, {},None)
    
    # Each response contains an HTTP response code.
    response_json = json.loads(response.read().decode('utf-8'))
    if response.code != 200:
        print("When retrieving assets : " + str(response.code))
        print(json.dumps(response_json, indent=2, separators=(',', ':')))
        exit(1)

    asset_field_lookup = {}
    if ( args[0].fields ):
        print("Asset fields:")
    for asset_field in response_json:
        asset_field_lookup[ asset_field['name' ] ] = asset_field['id']
        if ( args[0].fields ):
            print(asset_field['name' ])

    if( not args[0].file ):
        exit(1)

    # open file and get query
    file = open(args[0].file, 'r')

    if file == None:
        print("File not found " + args[0].file)
        exit(1)

    # This is the asset data to load, need to check all the names exist
    columnnames = file.readline().strip();
    fields = columnnames.split(',');

    asset_file_fields = {}
    field_index = 0;
    is_error = 0;
    for fname in fields:
        if (fname <> 'ipaddress') and (asset_field_lookup.get(fname,'')==''):
            print("Field = " + fname + " does not exist")
            is_error = 1
        elif( fname == 'ipaddress' ):
            asset_file_fields[ field_index ] = 0 
        else:
            asset_file_fields[ field_index ] = asset_field_lookup[ fname ]
        field_index = field_index + 1;

    # if there was an error print out the field
    if is_error == 1:
        print("Assets field: ")
        for k, v in asset_field_lookup.items():
            print(k)
        exit(1)
        
    # retrieve all the assets
    print("Retrieving assets from QRadar");
    response = api_client.call_api('asset_model/assets', 'GET',None, {},None)


    # Each response contains an HTTP response code.
    response_json = json.loads(response.read().decode('utf-8'))
    if response.code != 200:
        print("When retrieving assets : " + str(response.code))
        print(json.dumps(response_json, indent=2, separators=(',', ':')))
        exit(1)
    
    print( str(len(response_json)) + " assets retrieved");
    # loop over assets and add to a lookup table
    ip_assetid_lookup = {}
    ip_lastseen_lookup = {}

    for asset in response_json:
        interfaces = asset['interfaces'];
        for interface in interfaces:
            for ipaddresses in interface['ip_addresses']:

                # get the largest last seen we have from this asset
                max_last_seen = ipaddresses['last_seen_scanner']
                if ( ipaddresses['last_seen_profiler'] > max_last_seen ):
                    max_last_seen = ipaddresses['last_seen_profiler']

                # look to see if we have seen this IP address before
                last_seen = ip_lastseen_lookup.get( ipaddresses['value'] ,-1);
                if (last_seen == -1) or (last_seen < max_last_seen):
                    ip_lastseen_lookup[ ipaddresses['value'] ] = max_last_seen
                    ip_assetid_lookup[ ipaddresses['value'] ] = asset['id']

    # now we have loaded the assets and mapped ip address to asset id 
    # we can loop over the file
    data = file.readline().strip()
    
    update_success = 0;
    current_line = 2;
    while data <> '':
        
        # split values
        data_fields = data.split(',')

        json_string = "{ \"properties\": [ "
        index = 0;
        ip_address = '';
        if( len(data_fields) != len(asset_file_fields)):
            print("Error : Missing or extra fields at line " + str(current_line) )
        else:
            ip_address_found=0
            for data_field in data_fields:
                data_field = data_field.strip()
                # this is the IP address
                if index ==0:
                    ip_address = data_field
                    if( ip_assetid_lookup.get(ip_address,'') == '' ):
                        print("Error : IP address " + ip_address + " at line " + str(current_line) + " does not exist in QRadar Asset DB")
                    else:
                        ip_address_found = 1
                else:
                    json_string = json_string + "{ \"type_id\":" + str(asset_file_fields[index]) + ",\"value\":\"" + data_field + "\"}"

                index = index + 1;
                if (index < len(data_fields)) and (index <> 1):
                    json_string = json_string + ","

            if ip_address_found == 1:
                json_string = json_string + "]}"

                #print(" JSON = " + json_string)            
                # create JSON object
        
                response = api_client.call_api('asset_model/assets/'+str(ip_assetid_lookup[ip_address]), 'POST',{b'Accept': 'text/plain' },{},json_string)
                # Each response contains an HTTP response code.
                if response.code != 200:
                    response_json = json.loads(response.read().decode('utf-8'))
                    print("When updating asset : " + str(ip_assetid_lookup[ip_address]) + " " + ip_address)
                    print(" JSON = " + json_string)            
                    print(json.dumps(response_json, indent=2, separators=(',', ':')))
                    exit(1)
                update_success = update_success + 1
    
        data = file.readline().strip()
        current_line = current_line + 1
    print( str(update_success) + " assets sucessfully updated")

Example 30

Project: pyriscope Source File: processor.py
def process(args):
    # Make sure there are args, do a primary check for help.
    if len(args) == 0 or args[0] in ARGLIST_HELP:
        show_help()

    # Defaults arg flag settings.
    url_parts_list = []
    ffmpeg = True
    convert = False
    clean = False
    rotate = False
    agent_mocking = False
    name = ""
    live_duration = ""
    req_headers = {}

    # Check for ffmpeg.
    if shutil.which("ffmpeg") is None:
        ffmpeg = False

    # Read in args and set appropriate flags.
    cont = None
    for i in range(len(args)):
        if cont == ARGLIST_NAME:
            if args[i][0] in ('\'', '\"'):
                if args[i][-1:] == args[i][0]:
                    cont = None
                    name = args[i][1:-1]
                else:
                    cont = args[i][0]
                    name = args[i][1:]
            else:
                cont = None
                name = args[i]
            continue
        if cont in ('\'', '\"'):
            if args[i][-1:] == cont:
                cont = None
                name += " {}".format(args[i][:-1])
            else:
                name += " {}".format(args[i])
            continue
        if cont == ARGLIST_TIME:
            cont = None
            live_duration = args[i]

        if re.search(URL_PATTERN, args[i]) is not None:
            url_parts_list.append(dissect_url(args[i]))
        if args[i] in ARGLIST_HELP:
            show_help()
        if args[i] in ARGLIST_CONVERT:
            convert = True
        if args[i] in ARGLIST_CLEAN:
            convert = True
            clean = True
        if args[i] in ARGLIST_ROTATE:
            convert = True
            rotate = True
        if args[i] in ARGLIST_AGENTMOCK:
            agent_mocking = True
        if args[i] in ARGLIST_NAME:
            cont = ARGLIST_NAME
        if args[i] in ARGLIST_TIME:
            cont = ARGLIST_TIME


    # Check for URLs found.
    if len(url_parts_list) < 1:
        print("\nError: No valid URLs entered.")
        sys.exit(1)

    # Disable conversion/rotation if ffmpeg is not found.
    if convert and not ffmpeg:
        print("ffmpeg not found: Disabling conversion/rotation.")
        convert = False
        clean = False
        rotate = False

    # Set a mocked user agent.
    if agent_mocking:
        stdout("Getting mocked User-Agent.")
        req_headers['User-Agent'] = get_mocked_user_agent()
    else:
        req_headers['User-Agent'] = DEFAULT_UA


    url_count = 0
    for url_parts in url_parts_list:
        url_count += 1

        # Disable custom naming for multiple URLs.
        if len(url_parts_list) > 1:
            name = ""

        # Public Periscope API call to get information about the broadcast.
        if url_parts['token'] == "":
            req_url = PERISCOPE_GETBROADCAST.format("broadcast_id", url_parts['broadcast_id'])
        else:
            req_url = PERISCOPE_GETBROADCAST.format("token", url_parts['token'])

        stdout("Downloading broadcast information.")
        response = requests.get(req_url, headers=req_headers)
        broadcast_public = json.loads(response.text)

        if 'success' in broadcast_public and broadcast_public['success'] == False:
            print("\nError: Video expired/deleted/wasn't found: {}".format(url_parts['url']))
            continue

        # Loaded the correct JSON. Create file name.
        if name[-3:] == ".ts":
            name = name[:-3]
        if name[-4:] == ".mp4":
            name = name[:-4]
        if name == "":
            broadcast_start_time_end = broadcast_public['broadcast']['start'].rfind('.')
            timezone = broadcast_public['broadcast']['start'][broadcast_start_time_end:]
            timezone_start = timezone.rfind('-') if timezone.rfind('-') != -1 else timezone.rfind('+')
            timezone = timezone[timezone_start:].replace(':', '')
            to_zone = tz.tzlocal()
            broadcast_start_time = broadcast_public['broadcast']['start'][:broadcast_start_time_end]
            broadcast_start_time = "{}{}".format(broadcast_start_time, timezone)
            broadcast_start_time_dt = datetime.strptime(broadcast_start_time, '%Y-%m-%dT%H:%M:%S%z')
            broadcast_start_time_dt = broadcast_start_time_dt.astimezone(to_zone)
            broadcast_start_time = "{}-{:02d}-{:02d} {:02d}-{:02d}-{:02d}".format(
                broadcast_start_time_dt.year, broadcast_start_time_dt.month, broadcast_start_time_dt.day,
                broadcast_start_time_dt.hour, broadcast_start_time_dt.minute, broadcast_start_time_dt.second)
            name = "{} ({})".format(broadcast_public['broadcast']['username'], broadcast_start_time)

        name = sanitize(name)

        # Get ready to start capturing.
        if broadcast_public['broadcast']['state'] == 'RUNNING':
            # Cannot record live stream without ffmpeg.
            if not ffmpeg:
                print("\nError: Cannot record live stream without ffmpeg: {}".format(url_parts['url']))
                continue

            # The stream is live, start live capture.
            name = "{}.live".format(name)

            if url_parts['token'] == "":
                req_url = PERISCOPE_GETACCESS.format("broadcast_id", url_parts['broadcast_id'])
            else:
                req_url = PERISCOPE_GETACCESS.format("token", url_parts['token'])

            stdout("Downloading live stream information.")
            response = requests.get(req_url, headers=req_headers)
            access_public = json.loads(response.text)

            if 'success' in access_public and access_public['success'] == False:
                print("\nError: Video expired/deleted/wasn't found: {}".format(url_parts['url']))
                continue

            time_argument = ""
            if not live_duration == "":
                time_argument = " -t {}".format(live_duration)

            live_url = FFMPEG_LIVE.format(
                url_parts['url'],
                req_headers['User-Agent'],
                access_public['hls_url'],
                time_argument,
                name)

            # Start downloading live stream.
            stdout("Recording stream to {}.ts".format(name))

            Popen(live_url, shell=True, stdout=PIPE).stdout.read()

            stdoutnl("{}.ts Downloaded!".format(name))

            # Convert video to .mp4.
            if convert:
                stdout("Converting to {}.mp4".format(name))

                if rotate:
                    Popen(FFMPEG_ROT.format(name), shell=True, stdout=PIPE).stdout.read()
                else:
                    Popen(FFMPEG_NOROT.format(name), shell=True, stdout=PIPE).stdout.read()

                stdoutnl("Converted to {}.mp4!".format(name))

                if clean and os.path.exists("{}.ts".format(name)):
                    os.remove("{}.ts".format(name))
            continue

        else:
            if not broadcast_public['broadcast']['available_for_replay']:
                print("\nError: Replay unavailable: {}".format(url_parts['url']))
                continue

            # Broadcast replay is available.
            if url_parts['token'] == "":
                req_url = PERISCOPE_GETACCESS.format("broadcast_id", url_parts['broadcast_id'])
            else:
                req_url = PERISCOPE_GETACCESS.format("token", url_parts['token'])

            stdout("Downloading replay information.")
            response = requests.get(req_url, headers=req_headers)
            access_public = json.loads(response.text)

            if 'success' in access_public and access_public['success'] == False:
                print("\nError: Video expired/deleted/wasn't found: {}".format(url_parts['url']))
                continue

            base_url = access_public['replay_url']
            base_url_parts = dissect_replay_url(base_url)

            req_headers['Cookie'] = "{}={};{}={};{}={}".format(access_public['cookies'][0]['Name'],
                                                               access_public['cookies'][0]['Value'],
                                                               access_public['cookies'][1]['Name'],
                                                               access_public['cookies'][1]['Value'],
                                                               access_public['cookies'][2]['Name'],
                                                               access_public['cookies'][2]['Value'])
            req_headers['Host'] = "replay.periscope.tv"

            # Get the list of chunks to download.
            stdout("Downloading chunk list.")
            response = requests.get(access_public['replay_url'], headers=req_headers)
            chunks = response.text
            chunk_pattern = re.compile(r'chunk_\d+\.ts')

            download_list = []
            for chunk in re.findall(chunk_pattern, chunks):
                download_list.append(
                    {
                        'url': REPLAY_URL.format(base_url_parts['key'], chunk),
                        'file_name': chunk
                    }
                )

            # Download chunk .ts files and append them.
            pool = ThreadPool(name, DEFAULT_DL_THREADS, len(download_list))

            temp_dir_name = ".pyriscope.{}".format(name)
            if not os.path.exists(temp_dir_name):
                os.makedirs(temp_dir_name)

            stdout("Downloading replay {}.ts.".format(name))

            for chunk_info in download_list:
                temp_file_path = "{}/{}".format(temp_dir_name, chunk_info['file_name'])
                chunk_info['file_path'] = temp_file_path
                pool.add_task(download_chunk, chunk_info['url'], req_headers, temp_file_path)

            pool.wait_completion()

            if os.path.exists("{}.ts".format(name)):
                try:
                    os.remove("{}.ts".format(name))
                except:
                    stdoutnl("Failed to delete preexisting {}.ts.".format(name))

            with open("{}.ts".format(name), 'wb') as handle:
                for chunk_info in download_list:
                    file_path = chunk_info['file_path']
                    if not os.path.exists(file_path) or os.path.getsize(file_path) == 0:
                        break
                    with open(file_path, 'rb') as ts_file:
                        handle.write(ts_file.read())

            # don't delete temp if the download had missing chunks, just in case
            if pool.is_complete() and os.path.exists(temp_dir_name):
                try:
                    shutil.rmtree(temp_dir_name)
                except:
                    stdoutnl("Failed to delete temp folder: {}.".format(temp_dir_name))

            if pool.is_complete():
                stdoutnl("{}.ts Downloaded!".format(name))
            else:
                stdoutnl("{}.ts partially Downloaded!".format(name))

            # Convert video to .mp4.
            if convert:
                stdout("Converting to {}.mp4".format(name))

                if rotate:
                    Popen(FFMPEG_ROT.format(name), shell=True, stdout=PIPE).stdout.read()
                else:
                    Popen(FFMPEG_NOROT.format(name), shell=True, stdout=PIPE).stdout.read()

                stdoutnl("Converted to {}.mp4!".format(name))

                if clean and os.path.exists("{}.ts".format(name)):
                    try:
                        os.remove("{}.ts".format(name))
                    except:
                        stdout("Failed to delete {}.ts.".format(name))

    sys.exit(0)

Example 31

Project: service.nextup.notification Source File: Player.py
    def autoPlayPlayback(self):
        currentFile = xbmc.Player().getPlayingFile()

        # Get the active player
        result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "id": 1, "method": "Player.GetActivePlayers"}')
        result = unicode(result, 'utf-8', errors='ignore')
        self.logMsg("Got active player " + result, 2)
        result = json.loads(result)

        # Seems to work too fast loop whilst waiting for it to become active
        while not result["result"]:
            result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "id": 1, "method": "Player.GetActivePlayers"}')
            result = unicode(result, 'utf-8', errors='ignore')
            self.logMsg("Got active player " + result, 2)
            result = json.loads(result)

        if 'result' in result and result["result"][0] is not None:
            playerid = result["result"][0]["playerid"]

            # Get details of the playing media
            self.logMsg("Getting details of playing media", 1)
            result = xbmc.executeJSONRPC(
                '{"jsonrpc": "2.0", "id": 1, "method": "Player.GetItem", "params": {"playerid": ' + str(
                    playerid) + ', "properties": ["showtitle", "tvshowid", "episode", "season", "playcount"] } }')
            result = unicode(result, 'utf-8', errors='ignore')
            self.logMsg("Got details of playing media" + result, 2)

            result = json.loads(result)
            if 'result' in result:
                itemtype = result["result"]["item"]["type"]

            if self.strm_query(result):
                addonSettings = xbmcaddon.Addon(id='service.nextup.notification')
                playMode = addonSettings.getSetting("autoPlayMode")
                currentepisodenumber = result["result"]["item"]["episode"]
                currentseasonid = result["result"]["item"]["season"]
                currentshowtitle = result["result"]["item"]["showtitle"]
                tvshowid = result["result"]["item"]["tvshowid"]
                shortplayMode = addonSettings.getSetting("shortPlayMode")
                shortplayNotification= addonSettings.getSetting("shortPlayNotification")
                shortplayLength = int(addonSettings.getSetting("shortPlayLength")) * 60


                if (itemtype == "episode"):
                    # Get the next up episode
                    currentepisodeid = result["result"]["item"]["id"]
                elif tvshowid == -1:
                    # I am a STRM ###
                    tvshowid, episodeid = self.iStream_fix(tvshowid, currentshowtitle, currentepisodenumber, currentseasonid)
                    currentepisodeid = episodeid
                else:
                    # wtf am i doing here error.. ####
                    self.logMsg("Error: cannot determine if episode", 1)
                    return

            self.currentepisodeid = currentepisodeid
            self.logMsg("Getting details of next up episode for tvshow id: " + str(tvshowid), 1)
            if self.currenttvshowid != tvshowid:
                self.currenttvshowid = tvshowid
                self.playedinarow = 1

            result = xbmc.executeJSONRPC(
                '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %d, '
                '"properties": [ "title", "playcount", "season", "episode", "showtitle", "plot", '
                '"file", "rating", "resume", "tvshowid", "art", "firstaired", "runtime", "writer", '
                '"dateadded", "lastplayed" , "streamdetails"], "sort": {"method": "episode"}}, "id": 1}'
                % tvshowid)

            if result:
                result = unicode(result, 'utf-8', errors='ignore')
                result = json.loads(result)
                self.logMsg("Got details of next up episode %s" % str(result), 2)
                xbmc.sleep(100)

                # Find the next unwatched and the newest added episodes
                if "result" in result and "episodes" in result["result"]:
                    includeWatched = addonSettings.getSetting("includeWatched") == "true"
                    episode = self.findNextEpisode(result, currentFile, includeWatched)

                    if episode is None:
                        # no episode get out of here
                        return
                    self.logMsg("episode details %s" % str(episode), 2)
                    episodeid = episode["episodeid"]

                    if includeWatched:
                        includePlaycount = True
                    else:
                        includePlaycount = episode["playcount"] == 0
                    if includePlaycount and currentepisodeid != episodeid:
                        # we have a next up episode
                        nextUpPage = NextUpInfo("script-nextup-notification-NextUpInfo.xml",
                                                addonSettings.getAddonInfo('path'), "default", "1080i")
                        nextUpPage.setItem(episode)
                        stillWatchingPage = StillWatchingInfo(
                            "script-nextup-notification-StillWatchingInfo.xml",
                            addonSettings.getAddonInfo('path'), "default", "1080i")
                        stillWatchingPage.setItem(episode)
                        playedinarownumber = addonSettings.getSetting("playedInARow")
                        playTime = xbmc.Player().getTime()
                        totalTime =  xbmc.Player().getTotalTime()
                        self.logMsg("played in a row settings %s" % str(playedinarownumber), 2)
                        self.logMsg("played in a row %s" % str(self.playedinarow), 2)
                        if int(self.playedinarow) <= int(playedinarownumber):
                            self.logMsg(
                                "showing next up page as played in a row is %s" % str(self.playedinarow), 2)
                            if (shortplayNotification == "false") and (shortplayLength >= totalTime) and (shortplayMode == "true"):
                                self.logMsg("hiding notification for short videos")
                            else:
                                nextUpPage.show()
                        else:
                            self.logMsg(
                                "showing still watching page as played in a row %s" % str(self.playedinarow), 2)
                            if (shortplayNotification == "false") and (shortplayLength >= totalTime) and (shortplayMode == "true"):
                                self.logMsg("hiding notification for short videos")
                            else:
                                stillWatchingPage.show()
                        while xbmc.Player().isPlaying() and (
                                        totalTime - playTime > 1) and not nextUpPage.isCancel() and not nextUpPage.isWatchNow() and not stillWatchingPage.isStillWatching() and not stillWatchingPage.isCancel():
                            xbmc.sleep(100)
                            try:
                                playTime = xbmc.Player().getTime()
                                totalTime = xbmc.Player().getTotalTime()
                            except:
                                pass
                        if shortplayLength >= totalTime and shortplayMode == "true":
                            #play short video and don't add to playcount
                            self.playedinarow += 0
                            self.logMsg("Continuing short video autoplay - %s")
                            if nextUpPage.isWatchNow() or stillWatchingPage.isStillWatching():
                                self.playedinarow = 1
                            shouldPlayDefault = not nextUpPage.isCancel()
                        else:
                            if int(self.playedinarow) <= int(playedinarownumber):
                                nextUpPage.close()
                                shouldPlayDefault = not nextUpPage.isCancel()
                                shouldPlayNonDefault = nextUpPage.isWatchNow()
                            else:
                                stillWatchingPage.close()
                                shouldPlayDefault = stillWatchingPage.isStillWatching()
                                shouldPlayNonDefault = stillWatchingPage.isStillWatching()

                            if nextUpPage.isWatchNow() or stillWatchingPage.isStillWatching():
                                self.playedinarow = 1
                            else:
                                self.playedinarow += 1

                        if (shouldPlayDefault and playMode == "0") or (shouldPlayNonDefault and playMode == "1"):
                            self.logMsg("playing media episode id %s" % str(episodeid), 2)
                            # Signal to trakt previous episode watched
                            AddonSignals.sendSignal("NEXTUPWATCHEDSIGNAL", {'episodeid': self.currentepisodeid})

                            # Play media
                            xbmc.executeJSONRPC(
                                '{ "jsonrpc": "2.0", "id": 0, "method": "Player.Open", '
                                '"params": { "item": {"episodeid": ' + str(episode["episodeid"]) + '} } }')

Example 32

Project: bitex Source File: bootstrap.py
def main():
  candidates = ['bootstrap.ini']
  if len(sys.argv) > 1:
    candidates.append(os.path.expanduser(sys.argv[1]))

  config = ConfigParser.SafeConfigParser()
  config.read( candidates )

  from trade.models import Base, Currency, Instrument, User, Broker, DepositMethods
  db_engine = config.get('database','sqlalchemy_engine') + ':///' + os.path.expanduser(config.get('database','sqlalchemy_connection_string'))
  engine = create_engine( db_engine, echo=True)
  #engine.raw_connection().connection.text_factory = str

  Base.metadata.create_all(engine)


  session = scoped_session(sessionmaker(bind=engine))

  for section_name in config.sections():
    if section_name == 'currencies':
      for id, currency_json in config.items(section_name):
        c = json.loads(currency_json)

        if Currency.get_currency(session,c[0]) :
          continue
        e = Currency(code                 = c[0],
                     sign                 = c[1],
                     description          = c[2],
                     is_crypto            = c[3],
                     pip                  = c[4],
                     format_python        = c[5],
                     format_js            = c[6],
                     human_format_python  = c[7],
                     human_format_js      = c[8] )
        session.add(e)
        session.commit()


    if section_name == 'instruments':
      for id, instrument_json in config.items(section_name):
        currency_description = json.loads(instrument_json)

        if Instrument.get_instrument(session, currency_description[0]):
          continue

        e = Instrument(symbol=currency_description[0],
                       currency=currency_description[1],
                       description=currency_description[2])
        session.add(e)
        session.commit()

    if section_name[:4] == 'user':
      broker_id = None
      try:
        broker_id = config.getint(section_name, 'broker_id')
      except Exception,e:
        pass

      broker_username = None
      try:
        broker_username = config.get(section_name, 'broker_username')
      except Exception,e:
        pass


      if not User.get_user(session,broker_id, config.get(section_name, 'username')):
        password = base64.b32encode(os.urandom(10))
        try:
          password = config.get(section_name, 'password')
        except Exception,e:
          pass

        transaction_fee_buy = None
        try:
          transaction_fee_buy = config.getint(section_name, 'transaction_fee_buy')
        except Exception,e:
          pass

        transaction_fee_sell = None
        try:
          transaction_fee_sell = config.getint(section_name, 'transaction_fee_sell')
        except Exception,e:
          pass

        verified = 0
        try:
          verified = config.getint(section_name, 'verified')
        except Exception,e:
          pass

        is_system = False
        try:
          is_system = config.getboolean(section_name, 'is_system')
        except Exception,e:
          pass

        is_staff = False
        try:
          is_staff = config.getboolean(section_name, 'is_staff')
        except Exception,e:
          pass

        is_broker = False
        try:
          is_broker = config.getboolean(section_name, 'is_broker')
        except Exception,e:
          pass

        state = None
        try:
          state = config.get(section_name, 'state')
        except Exception,e:
          pass

        e = User(id                   = config.getint(section_name, 'id'),
                 username             = config.get(section_name, 'username'),
                 email                = config.get(section_name, 'email'),
                 broker_id            = broker_id,
                 broker_username      = broker_username,
                 password             = password,
                 country_code         = config.get(section_name, 'country_code'),
                 state                = state,
                 transaction_fee_buy  = transaction_fee_buy,
                 transaction_fee_sell = transaction_fee_sell,
                 verified             = verified,
                 is_staff             = is_staff,
                 is_system            = is_system,
                 is_broker            = is_broker,
                 email_lang           = config.get(section_name, 'email_lang'))
        session.add(e)
        session.commit()

    if section_name[:6] == 'broker':
      if not Broker.get_broker(session, config.getint(section_name, 'id')):
        phone_number_1 = None
        try:
          phone_number_1 = config.get(section_name, 'phone_number_1')
        except Exception,e:
          pass

        phone_number_2 = None
        try:
          phone_number_2 = config.get(section_name, 'phone_number_2')
        except Exception,e:
          pass

        skype = None
        try:
          skype = config.get(section_name, 'skype')
        except Exception,e:
          pass

        transaction_fee_buy = 0
        try:
          transaction_fee_buy = config.getint(section_name, 'transaction_fee_buy')
        except Exception,e:
          pass

        transaction_fee_sell = 0
        try:
          transaction_fee_sell = config.getint(section_name, 'transaction_fee_sell')
        except Exception,e:
          pass

        e = Broker(id                       = config.getint(section_name, 'id'),
                   short_name               = config.get(section_name, 'short_name'),
                   business_name            = config.get(section_name, 'business_name'),
                   address                  = config.get(section_name, 'address'),
                   signup_label             = config.get(section_name, 'signup_label'),
                   city                     = config.get(section_name, 'city'),
                   state                    = config.get(section_name, 'state'),
                   zip_code                 = config.get(section_name, 'zip_code'),
                   country_code             = config.get(section_name, 'country_code'),
                   lang                     = config.get(section_name, 'lang'),
                   country                  = config.get(section_name, 'country'),
                   mandrill_api_key         = config.get(section_name, 'mandrill_api_key'),
                   mailer_from_name         = config.get(section_name, 'mailer_from_name'),
                   mailer_from_email        = config.get(section_name, 'mailer_from_email'),
                   mailer_signature         = config.get(section_name, 'mailer_signature'),
                   mailchimp_list_id        = config.get(section_name, 'mailchimp_list_id'),
                   phone_number_1           = phone_number_1,
                   phone_number_2           = phone_number_2,
                   skype                    = skype,
                   email                    = config.get(section_name, 'email'),
                   verification_jotform     = config.get(section_name, 'verification_jotform'),
                   upload_jotform           = config.get(section_name, 'upload_jotform'),
                   currencies               = config.get(section_name, 'currencies'),
                   withdraw_structure       = json.dumps(json.loads(config.get(section_name, 'withdraw_structure', raw=True))).decode('utf-8'),
                   crypto_currencies        = json.dumps(json.loads(config.get(section_name, 'crypto_currencies', raw=True))).decode('utf-8'),
                   accept_customers_from    = json.dumps(json.loads(config.get(section_name, 'accept_customers_from', raw=True))).decode('utf-8'),
                   is_broker_hub            = config.getboolean(section_name, 'is_broker_hub'),
                   support_url              = config.get(section_name, 'support_url'),
                   tos_url                  = config.get(section_name, 'tos_url'),
                   fee_structure            = json.dumps(json.loads(config.get(section_name, 'fee_structure', raw=True))).decode('utf-8'),
                   transaction_fee_buy      = transaction_fee_buy,
                   transaction_fee_sell     = transaction_fee_sell,
                   accounts                 = json.dumps(json.loads(config.get(section_name, 'accounts', raw=True))).decode('utf-8'),
                   status                   = config.get(section_name, 'status'),
                   ranking                  = config.getint(section_name, 'ranking'))
        session.add(e)
        session.commit()


    if section_name[:14] == 'deposit_method':
      if not DepositMethods.get_deposit_method(session, config.getint(section_name, 'id')):
        e = DepositMethods(id                         = config.getint(section_name, 'id'),
                            broker_id                 = config.getint(section_name, 'broker_id'),
                            name                      = config.get(section_name, 'name').decode('utf-8'),
                            description               = config.get(section_name, 'description').decode('utf-8'),
                            disclaimer                = config.get(section_name, 'disclaimer').decode('utf-8'),
                            type                      = config.get(section_name, 'type'),
                            percent_fee               = config.getfloat(section_name, 'percent_fee'),
                            fixed_fee                 = config.getint(section_name, 'fixed_fee'),
                            broker_deposit_ctrl_num   = config.getint(section_name, 'broker_deposit_ctrl_num'),
                            currency                  = config.get(section_name, 'currency'),
                            deposit_limits            = json.dumps(json.loads(config.get(section_name, 'deposit_limits', raw=True))).decode('utf-8'),
                            html_template             = config.get(section_name, 'html_template', raw=True).decode('utf-8'),
                            parameters                = json.dumps(json.loads(config.get(section_name, 'parameters', raw=True))).decode('utf-8') )
        session.add(e)
        session.commit()

Example 33

Project: baidu-fuse Source File: baidufuse2.py
    def _add_file_to_buffer(self, path,file_info):
        foo = File()
        foo['st_ctime'] = file_info['local_ctime']
        foo['st_mtime'] = file_info['local_mtime']
        foo['st_mode'] = (stat.S_IFDIR | 0777) if file_info['isdir'] \
            else (stat.S_IFREG | 0777)
        foo['st_nlink'] = 2 if file_info['isdir'] else 1
        foo['st_size'] = file_info['size']
        self.buffer[path] = foo

    def _del_file_from_buffer(self,path):
        self.buffer.pop(path)

    def getattr(self, path, fh=None):
        #print 'getattr *',path
        # 先看缓存中是否存在该文件

        if not self.buffer.has_key(path):
            print path,'未命中'
            #print self.buffer
            #print self.traversed_folder
            jdata = json.loads(self.disk.meta([path]).content)
            try:
                if 'info' not in jdata:
                    raise FuseOSError(errno.ENOENT)
                if jdata['errno'] != 0:
                    raise FuseOSError(errno.ENOENT)
                file_info = jdata['info'][0]
                self._add_file_to_buffer(path,file_info)
                st = self.buffer[path].getDict()
                return st
            except:
                raise FuseOSError(errno.ENOENT)
        else:
            #print path,'命中'
            return self.buffer[path].getDict()



    def readdir(self, path, offset):
        
        while True:
            try:
                logger.debug(u'读取目录' + path)
                foo = json.loads(self.disk.list_files(path).text)
                break
            except Exception as s:
                logger.error('error',str(s))



        files = ['.', '..']
        abs_files = [] # 该文件夹下文件的绝对路径
        for file in foo['list']:
            files.append(file['server_filename'])
            abs_files.append(file['path'])
        # 缓存文件夹下文件信息,批量查询meta info

        # Update:解决meta接口一次不能查询超过100条记录
        # 分成 ceil(file_num / 100.0) 组,利用商群
        if not self.traversed_folder.has_key(path) or self.traversed_folder[path] == False:
            logger.debug(u'正在对'+path+u'缓存中')
            file_num = len(abs_files)
            group = int(math.ceil(file_num / 100.0))
            for i in range(group):
                obj = [f for n,f in enumerate(abs_files) if n % group == i] #一组数据
                while 1:
                    try:
                        ret = json.loads(self.disk.meta(obj).text)
                        break
                    except:
                        print 'error'

                for file_info in ret['info']:
                    if not self.buffer.has_key(file_info['path']):
                        self._add_file_to_buffer(file_info['path'],file_info)
            #print self.buffer
            print '对',path,'的缓存完成'
            self.traversed_folder[path] = True
        for r in files:
            yield r

    def _update_file_manual(self,path):
        while 1:
            try:
                jdata = json.loads(self.disk.meta([path]).content)
                break
            except:
                print 'error'

        if 'info' not in jdata:
            raise FuseOSError(errno.ENOENT)
        if jdata['errno'] != 0:
            raise FuseOSError(errno.ENOENT)
        file_info = jdata['info'][0]
        self._add_file_to_buffer(path,file_info)


    def rename(self, old, new):
        #logging.debug('* rename',old,os.path.basename(new))
        print '*'*10,'RENAME CALLED',old,os.path.basename(new),type(old),type(new)
        while True:
            try:
                ret = self.disk.rename([(old,os.path.basename(new))]).content
                jdata = json.loads(ret)
                break
            except:
                print 'error'

        if jdata['errno'] != 0:
            # 文件名已存在,删除原文件
            print self.disk.delete([new]).content
            print self.disk.rename([(old,os.path.basename(new))])
        self._update_file_manual(new)
        self.buffer.pop(old)


    def open(self, path, flags):
        self.readLock.acquire()
        print '*'*10,'OPEN CALLED',path,flags
        #print '[cuem]',path
        """
        Permission denied

        accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
        if (flags & accmode) != os.O_RDONLY:
            raise FuseOSError(errno.EACCES)
        """
        self.fd += 1
        self.readLock.release()
        
        return self.fd

    def create(self, path, mode,fh=None):
        # 创建文件
        # 中文路径有问题
        print '*'*10,'CREATE CALLED',path,mode,type(path)
        #if 'outputstream' not in path:
        tmp_file = tempfile.TemporaryFile('r+w+b')
        foo = self.disk.upload(os.path.dirname(path),tmp_file,os.path.basename(path)).content
        ret = json.loads(foo)
        print ret
        print 'create-not-outputstream',ret
        if ret['path'] != path:
            # 文件已存在
            print '文件已存在'
            raise FuseOSError(errno.EEXIST)
        '''
        else:
            print 'create:',path
            foo = File()
            foo['st_ctime'] = int(time.time())
            foo['st_mtime'] = int(time.time())
            foo['st_mode'] = (stat.S_IFREG | 0777)
            foo['st_nlink'] = 1
            foo['st_size'] = 0
            self.buffer[path] = foo
        '''


        '''
        dict(st_mode=(stat.S_IFREG | mode), st_nlink=1,
                                st_size=0, st_ctime=time.time(), st_mtime=time.time(),
                                st_atime=time.time())
        '''
        self.fd += 1
        return 0

    def write(self, path, data, offset, fp):
        # 上传文件时会调用
        # 4kb ( 4096 bytes ) 每块,data中是块中的数据
        # 最后一块的判断:len(data) < 4096
        # 文件大小 = 最后一块的offset + len(data)

        # 4kb传太慢了,合计成2M传一次

        #print '*'*10,path,offset, len(data)

        def _block_size(stream):
            stream.seek(0,2)
            return stream.tell()

        _BLOCK_SIZE = 16 * 2 ** 20
        # 第一块的任务
        if offset == 0:
            #self.uploadLock.acquire()
            #self.readLock.acquire()
            # 初始化块md5列表
            self.upload_blocks[path] = {'tmp':None,
                                        'blocks':[]}
            # 创建缓冲区临时文件
            tmp_file = tempfile.TemporaryFile('r+w+b')
            self.upload_blocks[path]['tmp'] = tmp_file

        # 向临时文件写入数据,检查是否>= _BLOCK_SIZE 是则上传该块并将临时文件清空
        try:
            tmp = self.upload_blocks[path]['tmp']
        except KeyError:
            return 0
        tmp.write(data)

        if _block_size(tmp) > _BLOCK_SIZE:
            print path,'发生上传'
            tmp.seek(0)
            try:
                foo = self.disk.upload_tmpfile(tmp,callback=ProgressBar()).content
                foofoo = json.loads(foo)
                block_md5 = foofoo['md5']
            except:
                 print foo



            # 在 upload_blocks 中插入本块的 md5
            self.upload_blocks[path]['blocks'].append(block_md5)
            # 创建缓冲区临时文件
            self.upload_blocks[path]['tmp'].close()
            tmp_file = tempfile.TemporaryFile('r+w+b')
            self.upload_blocks[path]['tmp'] = tmp_file
            print '创建临时文件',tmp_file.name

        # 最后一块的任务
        if len(data) < 4096:
            # 检查是否有重名,有重名则删除它
            while True:
                try:
                    foo = self.disk.meta([path]).content
                    foofoo = json.loads(foo)
                    break
                except:
                    print 'error'


            if foofoo['errno'] == 0:
                logger.debug('Deleted the file which has same name.')
                self.disk.delete([path])
            # 看看是否需要上传
            if _block_size(tmp) != 0:
                # 此时临时文件有数据,需要上传
                print path,'发生上传,块末尾,文件大小',_block_size(tmp)
                tmp.seek(0)
                while True:
                    try:
                        foo = self.disk.upload_tmpfile(tmp,callback=ProgressBar()).content
                        foofoo = json.loads(foo)
                        break
                    except:
                        print 'exception, retry.'

                block_md5 = foofoo['md5']
                # 在 upload_blocks 中插入本块的 md5
                self.upload_blocks[path]['blocks'].append(block_md5)

            # 调用 upload_superfile 以合并块文件
            print '合并文件',path,type(path)
            self.disk.upload_superfile(path,self.upload_blocks[path]['blocks'])
            # 删除upload_blocks中数据
            self.upload_blocks.pop(path)
            # 更新本地文件列表缓存
            self._update_file_manual(path)
            #self.readLock.release()
            #self.uploadLock.release()
        return len(data)


    def mkdir(self, path, mode):
        logger.debug("mkdir is:" + path)
        self.disk.mkdir(path)

    def rmdir(self, path):
        logger.debug("rmdir is:" + path)
        self.disk.delete([path])

    def read(self, path, size, offset, fh):
        print '*'*10,'READ CALLED',path,size,offset
        #logger.debug("read is: " + path)
        # 改为由第三方工具下载并每次判断下载的临时文件大小

        if offset == 0:
            tmp = tempfile.mktemp()
            url = self.disk.download_url([path])[0]
            logger.debug('%s started downloader' % url)
            """
            thread = threading.Thread(target=self.downlaoder, args=(url, tmp))
            thread.start()
            while thread.isAlive():
                pass
            """
            number = 5

            cookies = ';'.join(['%s=%s' % (k, v)
                        for k, v in self.disk.session.cookies.items()])
            cmd = 'axel --alternate -n{0} -H "Cookies:{1}" "{2}" -o "{3}"'.format(number, cookies, url, tmp)
            logger.debug('now start axel on %s' % path)
            os.system(cmd)
            logger.debug('axel on %s done.' % path)

            # self.downloader(url, tmp)
            logger.debug('%s downloaded' % url)
            self.downloading_files[path] = (tmp, open(tmp,'rb'))

        file_handler = self.downloading_files[path][1]
        return file_handler.read(size)


        """
        paras = {'Range': 'bytes=%s-%s' % (offset, offset + size - 1)}
        while True:
            try:
                foo = self.disk.download(path, headers=paras).content
                return foo
            except:
                pass
        """

    def downloader(self, url, path):
        number = 5

        cookies = ';'.join(['%s=%s' % (k, v)
                    for k, v in self.disk.session.cookies.items()])
        cmd = 'axel --alternate -n{0} -H "Cookies:{1}" {2} -o {3}'.format(number, cookies, url, path)
        logger.debug('now start axel on %s' % path)
        os.system(cmd)
        logger.debug('axel on %s done.' % path)
        return

    access = None
    statfs = None

Example 34

Project: pagure Source File: test_pagure_flask_api_issue.py
    @patch('pagure.lib.git.update_git')
    @patch('pagure.lib.notify.send_email')
    def test_api_assign_issue(self, p_send_email, p_ugt):
        """ Test the api_assign_issue method of the flask api. """
        p_send_email.return_value = True
        p_ugt.return_value = True

        tests.create_projects(self.session)
        tests.create_tokens(self.session)
        tests.create_tokens_acl(self.session)

        headers = {'Authorization': 'token aaabbbcccddd'}

        # Invalid project
        output = self.app.post('/api/0/foo/issue/1/assign', headers=headers)
        self.assertEqual(output.status_code, 404)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "error": "Project not found",
              "error_code": "ENOPROJECT",
            }
        )

        # Valid token, wrong project
        output = self.app.post('/api/0/test2/issue/1/assign', headers=headers)
        self.assertEqual(output.status_code, 401)
        data = json.loads(output.data)
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.name,
                         data['error_code'])
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data['error'])

        # No input
        output = self.app.post('/api/0/test/issue/1/assign', headers=headers)
        self.assertEqual(output.status_code, 404)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "error": "Issue not found",
              "error_code": "ENOISSUE",
            }
        )

        # Create normal issue
        repo = pagure.lib.get_project(self.session, 'test')
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue #1',
            content='We should work on this',
            user='pingou',
            ticketfolder=None,
            private=False,
            issue_uid='aaabbbccc#1',
        )
        self.session.commit()
        self.assertEqual(msg.title, 'Test issue #1')

        # Check comments before
        repo = pagure.lib.get_project(self.session, 'test')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(len(issue.comments), 0)

        data = {
            'title': 'test issue',
        }

        # Incomplete request
        output = self.app.post(
            '/api/0/test/issue/1/assign', data=data, headers=headers)
        self.assertEqual(output.status_code, 400)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "error": "Invalid or incomplete input submited",
              "error_code": "EINVALIDREQ",
            }
        )

        # No change
        repo = pagure.lib.get_project(self.session, 'test')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(issue.status, 'Open')

        data = {
            'assignee': 'pingou',
        }

        # Valid request
        output = self.app.post(
            '/api/0/test/issue/1/assign', data=data, headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {'message': 'Issue assigned'}
        )

        # One comment added
        repo = pagure.lib.get_project(self.session, 'test')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(issue.assignee.user, 'pingou')

        # Create another project
        item = pagure.lib.model.Project(
            user_id=2,  # foo
            name='foo',
            description='test project #3',
            hook_token='aaabbbdddeee',
        )
        self.session.add(item)
        self.session.commit()

        # Create a token for pingou for this project
        item = pagure.lib.model.Token(
            id='pingou_foo',
            user_id=1,
            project_id=3,
            expiration=datetime.datetime.utcnow() + datetime.timedelta(
                days=30)
        )
        self.session.add(item)
        self.session.commit()

        # Give `issue_change_status` to this token when `issue_comment`
        # is required
        item = pagure.lib.model.TokenAcl(
            token_id='pingou_foo',
            acl_id=5,
        )
        self.session.add(item)
        self.session.commit()

        repo = pagure.lib.get_project(self.session, 'foo')
        # Create private issue
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue',
            content='We should work on this',
            user='foo',
            ticketfolder=None,
            private=True,
            issue_uid='aaabbbccc#2',
        )
        self.session.commit()
        self.assertEqual(msg.title, 'Test issue')

        # Check before
        repo = pagure.lib.get_project(self.session, 'foo')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(len(issue.comments), 0)

        data = {
            'assignee': 'pingou',
        }
        headers = {'Authorization': 'token pingou_foo'}

        # Valid request but un-authorized
        output = self.app.post(
            '/api/0/foo/issue/1/assign', data=data, headers=headers)
        self.assertEqual(output.status_code, 401)
        data = json.loads(output.data)
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.name,
                         data['error_code'])
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data['error'])

        # No comment added
        repo = pagure.lib.get_project(self.session, 'foo')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(len(issue.comments), 0)

        # Create token for user foo
        item = pagure.lib.model.Token(
            id='foo_token2',
            user_id=2,
            project_id=3,
            expiration=datetime.datetime.utcnow() + datetime.timedelta(days=30)
        )
        self.session.add(item)
        self.session.commit()
        tests.create_tokens_acl(self.session, token_id='foo_token2')

        data = {
            'assignee': 'pingou',
        }
        headers = {'Authorization': 'token foo_token2'}

        # Valid request and authorized
        output = self.app.post(
            '/api/0/foo/issue/1/assign', data=data, headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {'message': 'Issue assigned'}
        )

Example 35

Project: MediaBrowser.Kodi Source File: InProgressItems.py
    def updateInProgress(self):
        self.logMsg("updateInProgress Called")
        useBackgroundData = xbmcgui.Window(10000).getProperty("BackgroundDataLoaded") == "true"
        addonSettings = xbmcaddon.Addon(id='plugin.video.xbmb3c')
        mb3Host = addonSettings.getSetting('ipaddress')
        mb3Port = addonSettings.getSetting('port')    
        userName = addonSettings.getSetting('username')     
        
        downloadUtils = DownloadUtils()
        userid = downloadUtils.getUserId()
        self.logMsg("InProgress UserName : " + userName + " UserID : " + userid)
        
        self.logMsg("Updating In Progress Movie List")
        
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=30&Recursive=true&SortBy=DatePlayed&SortOrder=Descending&Fields=Path,Genres,MediaStreams,Overview,ShortOverview,CriticRatingSummary&Filters=IsResumable&IncludeItemTypes=Movie&format=json"
   
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        if(jsonData == ""):
            return
            
        result = json.loads(jsonData)
        result = result.get("Items")
        if(result == None):
            result = []
            
        db = Database()
        WINDOW = xbmcgui.Window( 10000 )

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
            
            rating = item.get("CommunityRating")
            criticrating = item.get("CriticRating")
            officialrating = item.get("OfficialRating")
            criticratingsummary = ""
            if(item.get("CriticRatingSummary") != None):
                criticratingsummary = item.get("CriticRatingSummary").encode('utf-8')
            plot = item.get("Overview")
            if plot == None:
                plot=''
            plot=plot.encode('utf-8')
            shortplot = item.get("ShortOverview")
            if shortplot == None:
                shortplot = ''
            shortplot = shortplot.encode('utf-8')
            year = item.get("ProductionYear")
            if(item.get("RunTimeTicks") != None):
                runtime = str(int(item.get("RunTimeTicks"))/(10000000*60))
            else:
                runtime = "0"

            userData = item.get("UserData")
            if(userData != None):                
                reasonableTicks = int(userData.get("PlaybackPositionTicks")) / 1000
                seekTime = reasonableTicks / 10000
                duration = float(runtime)
                resume = float(seekTime) / 60.0
                if (duration == 0):
                    percentage=0
                else:
                    percentage = (resume / duration) * 100.0
                perasint = int(percentage)
                title = str(perasint) + "% " + title        
                
            item_id = item.get("Id")
            if useBackgroundData != True:
                poster = downloadUtils.getArtwork(item, "Primary3")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(item, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                landscape = downloadUtils.getArtwork(item, "Thumb3")
                discart = downloadUtils.getArtwork(item, "Disc")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumbnail = downloadUtils.getArtwork(item, "Thumb3")
                else:
                    realthumbnail = medium_fanart
            else:
                poster = db.get(item_id +".Primary3")
                thumbnail = db.get(item_id +".Primary")
                logo = db.get(item_id +".Logo")
                fanart = db.get(item_id +".Backdrop")
                landscape = db.get(item_id +".Thumb3")
                discart = db.get(item_id +".Disc")
                medium_fanart = db.get(item_id +".Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumbnail = db.get(item_id +".Thumb3")
                else:
                    realthumbnail = medium_fanart
			          
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Title = " + title, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Thumb = " + realthumbnail, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Art(fanart)  = " + fanart, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Art(discart)  = " + discart, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Art(clearlogo)  = " + logo, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Art(poster)  = " + poster, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Rating  = " + str(rating), level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".CriticRating  = " + str(criticrating), level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".CriticRatingSummary  = " + criticratingsummary, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Year  = " + str(year), level=2)
            self.logMsg("InProgressMovieMB3." + str(item_count) + ".Runtime  = " + str(runtime), level=2)
            
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Title", title)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Thumb", realthumbnail)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Path", playUrl)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Art(fanart)", fanart)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Art(landscape)", landscape)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Art(discart)", discart)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Art(medium_fanart)", medium_fanart)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Art(clearlogo)", logo)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Art(poster)", poster)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Rating", str(rating))
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Mpaa", str(officialrating))
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".CriticRating", str(criticrating))
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".CriticRatingSummary", criticratingsummary)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Plot", plot)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".ShortPlot", shortplot)
            
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Year", str(year))
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".Runtime", str(runtime))
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".ItemGUID", item_id)
            WINDOW.setProperty("InProgressMovieMB3." + str(item_count) + ".id", item_id)
            
            WINDOW.setProperty("InProgressMovieMB3.Enabled", "true")
            
            item_count = item_count + 1
        
        # blank any not available
        for x in range(item_count, 11):
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Title", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Thumb", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Path", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Art(fanart)", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Art(discart)", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Art(clearlogo)", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Art(poster)", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Rating", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".CriticRating", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".CriticRatingSummary", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Plot", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Year", "")
            WINDOW.setProperty("InProgressMovieMB3." + str(x) + ".Runtime", "")
        
        
        #Updating Recent TV Show List
        self.logMsg("Updating In Progress Episode List")
        
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=30&Recursive=true&SortBy=DatePlayed&SortOrder=Descending&Fields=Path,Genres,MediaStreams,Overview,CriticRatingSummary&Filters=IsResumable&IncludeItemTypes=Episode&format=json"
        
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        result = json.loads(jsonData)
        
        result = result.get("Items")
        if(result == None):
            result = []   

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
                
            seriesName = "Missing Name"
            if(item.get("SeriesName") != None):
                seriesName = item.get("SeriesName").encode('utf-8')   

            eppNumber = "X"
            tempEpisodeNumber = "00"
            if(item.get("IndexNumber") != None):
                eppNumber = item.get("IndexNumber")
                if eppNumber < 10:
                  tempEpisodeNumber = "0" + str(eppNumber)
                else:
                  tempEpisodeNumber = str(eppNumber)
            
            seasonNumber = item.get("ParentIndexNumber")
            if seasonNumber < 10:
              tempSeasonNumber = "0" + str(seasonNumber)
            else:
              tempSeasonNumber = str(seasonNumber)
            rating = str(item.get("CommunityRating"))
            plot = item.get("Overview")
            if plot == None:
                plot=''
            plot=plot.encode('utf-8')
            
            if(item.get("RunTimeTicks") != None):
                runtime = str(int(item.get("RunTimeTicks"))/(10000000*60))
            else:
                runtime = "0"            
            
            userData = item.get("UserData")
            if(userData != None):                
                reasonableTicks = int(userData.get("PlaybackPositionTicks")) / 1000
                seekTime = reasonableTicks / 10000
                duration = float(runtime)
                resume = float(seekTime) / 60.0
                if (duration == 0):
                    percentage=0
                else:
                    percentage = (resume / duration) * 100.0
                perasint = int(percentage)
                title = str(perasint) + "% " + title               

            item_id = item.get("Id")    
            seriesId = item.get("SeriesId")
            
            if useBackgroundData != True:
                seriesJsonData = downloadUtils.downloadUrl("http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items/" + seriesId + "?format=json", suppress=True, popup=1 )
                seriesResult = json.loads(seriesJsonData)      
                poster = downloadUtils.getArtwork(seriesResult, "Primary3")
                small_poster = downloadUtils.getArtwork(seriesResult, "Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(seriesResult, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                banner = downloadUtils.getArtwork(item, "Banner")
                if (seriesResult.get("ImageTags") != None and seriesResult.get("ImageTags").get("Thumb") != None):
                  seriesthumbnail = downloadUtils.getArtwork(seriesResult, "Thumb3")
                else:
                  seriesthumbnail = fanart 
            else:
                officialrating = db.get(seriesId + ".OfficialRating")
                poster = db.get(seriesId + ".Primary3")
                small_poster = db.get(seriesId + ".Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = db.get(seriesId + ".Logo")
                fanart = db.get(seriesId + ".Backdrop")
                medium_fanart = db.get(seriesId + ".Backdrop3")
                banner = db.get(seriesId + ".Banner")
                if item.get("SeriesThumbImageTag") != None:
                   seriesthumbnail = db.get(seriesId + ".Thumb3")
                else:
                   seriesthumbnail = fanart
              
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".EpisodeTitle = " + title, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".ShowTitle = " + seriesName, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".EpisodeNo = " + tempEpisodeNumber, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".SeasonNo = " + tempSeasonNumber, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Rating  = " + rating, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)  = " + fanart, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)  = " + logo, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)  = " + banner, level=2)  
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)  = " + poster, level=2)
            self.logMsg("InProgresstEpisodeMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".EpisodeTitle", title)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".ShowTitle", seriesName)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".EpisodeNo", tempEpisodeNumber)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".SeasonNo", tempSeasonNumber)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".SeriesThumb", seriesthumbnail)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Path", playUrl)            
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Rating", rating)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)", fanart)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.medium_fanart)", medium_fanart)
            
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)", logo)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)", banner)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)", poster)
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(item_count) + ".Plot", plot)
            
            WINDOW.setProperty("InProgresstEpisodeMB3.Enabled", "true")
            
            item_count = item_count + 1
            
        # blank any not available
        for x in range(item_count, 11):            
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".EpisodeTitle", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".ShowTitle", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".EpisodeNo", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".SeasonNo", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Thumb", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Path", "")            
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Rating", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Art(tvshow.fanart)", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Art(tvshow.clearlogo)", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Art(tvshow.banner)", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Art(tvshow.poster)", "")
            WINDOW.setProperty("InProgresstEpisodeMB3." + str(x) + ".Plot", "")        

Example 36

Project: avos Source File: nova_data.py
def data(TEST):
    TEST.servers = utils.TestDataContainer()
    TEST.flavors = utils.TestDataContainer()
    TEST.flavor_access = utils.TestDataContainer()
    TEST.keypairs = utils.TestDataContainer()
    TEST.security_groups = utils.TestDataContainer()
    TEST.security_groups_uuid = utils.TestDataContainer()
    TEST.security_group_rules = utils.TestDataContainer()
    TEST.security_group_rules_uuid = utils.TestDataContainer()
    TEST.volumes = utils.TestDataContainer()
    TEST.quotas = utils.TestDataContainer()
    TEST.quota_usages = utils.TestDataContainer()
    TEST.disabled_quotas = utils.TestDataContainer()
    TEST.floating_ips = utils.TestDataContainer()
    TEST.floating_ips_uuid = utils.TestDataContainer()
    TEST.usages = utils.TestDataContainer()
    TEST.certs = utils.TestDataContainer()
    TEST.volume_snapshots = utils.TestDataContainer()
    TEST.volume_types = utils.TestDataContainer()
    TEST.availability_zones = utils.TestDataContainer()
    TEST.hypervisors = utils.TestDataContainer()
    TEST.services = utils.TestDataContainer()
    TEST.aggregates = utils.TestDataContainer()
    TEST.hosts = utils.TestDataContainer()

    # Data return by novaclient.
    # It is used if API layer does data conversion.
    TEST.api_floating_ips = utils.TestDataContainer()
    TEST.api_floating_ips_uuid = utils.TestDataContainer()

    # Volumes
    volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3775",
         "name": 'test_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Volume name',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
         "name": '',
         "status": 'in-use',
         "size": 10,
         "display_name": '',
         "display_description": '',
         "device": "/dev/hda",
         "created_at": '2010-11-21 18:34:25',
         "volume_type": 'vol_type_1',
         "attachments": [{"id": "1", "server_id": '1',
                          "device": "/dev/hda"}]})
    attached_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
         "name": 'my_volume',
         "status": 'in-use',
         "size": 30,
         "display_name": 'My Volume',
         "display_description": '',
         "device": "/dev/hdk",
         "created_at": '2011-05-01 11:54:33',
         "volume_type": 'vol_type_2',
         "attachments": [{"id": "2", "server_id": '1',
                          "device": "/dev/hdk"}]})
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3771",
         "name": 'non_bootable_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Non Bootable Volume',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    attached_volume.bootable = 'true'
    non_bootable_volume.bootable = 'false'

    TEST.volumes.add(volume)
    TEST.volumes.add(nameless_volume)
    TEST.volumes.add(attached_volume)
    TEST.volumes.add(non_bootable_volume)

    vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
                                        {'id': 1,
                                         'name': 'vol_type_1'})
    vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
                                        {'id': 2,
                                         'name': 'vol_type_2'})
    TEST.volume_types.add(vol_type1, vol_type2)

    # Flavors
    flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
                               'name': 'm1.tiny',
                               'vcpus': 1,
                               'disk': 0,
                               'ram': 512,
                               'swap': 0,
                               'extra_specs': {},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 0})
    flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
                               'name': 'm1.massive',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'extra_specs': {'Trusted': True, 'foo': 'bar'},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_3 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
                               'name': 'm1.secret',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'extra_specs': {},
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_4 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee",
                               'name': 'm1.metadata',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'extra_specs': FlavorExtraSpecs(
                                   {'key': 'key_mock',
                                    'value': 'value_mock'}),
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    TEST.flavors.add(flavor_1, flavor_2, flavor_3, flavor_4)

    flavor_access_manager = flavor_access.FlavorAccessManager(None)
    flavor_access_1 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "1",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    flavor_access_2 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "2",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    TEST.flavor_access.add(flavor_access_1, flavor_access_2)

    # Key pairs
    keypair = keypairs.Keypair(keypairs.KeypairManager(None),
                               dict(name='keyName'))
    TEST.keypairs.add(keypair)

    # Security Groups and Rules
    def generate_security_groups(is_uuid=False):

        def get_id(is_uuid):
            global current_int_id
            if is_uuid:
                return str(uuid.uuid4())
            else:
                get_id.current_int_id += 1
                return get_id.current_int_id

        get_id.current_int_id = 0

        sg_manager = sec_groups.SecurityGroupManager(None)
        rule_manager = rules.SecurityGroupRuleManager(None)

        sec_group_1 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"default",
                                                "description": u"default"})
        sec_group_2 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"other_group",
                                                "description": u"NotDefault."})
        sec_group_3 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"another_group",
                                                "description": u"NotDefault."})

        rule = {'id': get_id(is_uuid),
                'group': {},
                'ip_protocol': u"tcp",
                'from_port': u"80",
                'to_port': u"80",
                'parent_group_id': sec_group_1.id,
                'ip_range': {'cidr': u"0.0.0.0/32"}}

        icmp_rule = {'id': get_id(is_uuid),
                     'group': {},
                     'ip_protocol': u"icmp",
                     'from_port': u"9",
                     'to_port': u"5",
                     'parent_group_id': sec_group_1.id,
                     'ip_range': {'cidr': u"0.0.0.0/32"}}

        group_rule = {'id': 3,
                      'group': {},
                      'ip_protocol': u"tcp",
                      'from_port': u"80",
                      'to_port': u"80",
                      'parent_group_id': sec_group_1.id,
                      'source_group_id': sec_group_1.id}

        rule_obj = rules.SecurityGroupRule(rule_manager, rule)
        rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
        rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)

        sec_group_1.rules = [rule_obj]
        sec_group_2.rules = [rule_obj]

        return {"rules": [rule_obj, rule_obj2, rule_obj3],
                "groups": [sec_group_1, sec_group_2, sec_group_3]}

    sg_data = generate_security_groups()
    TEST.security_group_rules.add(*sg_data["rules"])
    TEST.security_groups.add(*sg_data["groups"])

    sg_uuid_data = generate_security_groups(is_uuid=True)
    TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
    TEST.security_groups_uuid.add(*sg_uuid_data["groups"])

    # Quota Sets
    quota_data = dict(metadata_items='1',
                      injected_file_content_bytes='1',
                      volumes='1',
                      gigabytes='1000',
                      ram=10000,
                      floating_ips='1',
                      fixed_ips='10',
                      instances='10',
                      injected_files='1',
                      cores='10',
                      security_groups='10',
                      security_group_rules='20')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.quotas.nova = base.QuotaSet(quota)
    TEST.quotas.add(base.QuotaSet(quota))

    # nova quotas disabled when neutron is enabled
    disabled_quotas_nova = ['floating_ips', 'fixed_ips',
                            'security_groups', 'security_group_rules']
    TEST.disabled_quotas.add(disabled_quotas_nova)

    # Quota Usages
    quota_usage_data = {'gigabytes': {'used': 0,
                                      'quota': 1000},
                        'instances': {'used': 0,
                                      'quota': 10},
                        'ram': {'used': 0,
                                'quota': 10000},
                        'cores': {'used': 0,
                                  'quota': 20},
                        'floating_ips': {'used': 0,
                                         'quota': 10},
                        'security_groups': {'used': 0,
                                            'quota': 10},
                        'volumes': {'used': 0,
                                    'quota': 10}}
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.quota_usages.add(quota_usage)

    # Limits
    limits = {"absolute": {"maxImageMeta": 128,
                           "maxPersonality": 5,
                           "maxPersonalitySize": 10240,
                           "maxSecurityGroupRules": 20,
                           "maxSecurityGroups": 10,
                           "maxServerMeta": 128,
                           "maxTotalCores": 20,
                           "maxTotalFloatingIps": 10,
                           "maxTotalInstances": 10,
                           "maxTotalKeypairs": 100,
                           "maxTotalRAMSize": 10000,
                           "totalCoresUsed": 0,
                           "totalInstancesUsed": 0,
                           "totalKeyPairsUsed": 0,
                           "totalRAMUsed": 0,
                           "totalSecurityGroupsUsed": 0}}
    TEST.limits = limits

    # Servers
    tenant3 = TEST.tenants.list()[2]

    vals = {"host": "http://nova.example.com:8774",
            "name": "server_1",
            "status": "ACTIVE",
            "tenant_id": TEST.tenants.first().id,
            "user_id": TEST.user.id,
            "server_id": "1",
            "flavor_id": flavor_1.id,
            "image_id": TEST.images.first().id,
            "key_name": keypair.name}
    server_1 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_2",
                 "status": "BUILD",
                 "server_id": "2"})
    server_2 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": u'\u4e91\u89c4\u5219',
                 "status": "ACTIVE",
                 "tenant_id": tenant3.id,
                "server_id": "3"})
    server_3 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    TEST.servers.add(server_1, server_2, server_3)

    # VNC Console Data
    console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
                            u'type': u'novnc'}}
    TEST.servers.vnc_console_data = console
    # SPICE Console Data
    console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
                            u'type': u'spice'}}
    TEST.servers.spice_console_data = console
    # RDP Console Data
    console = {u'console': {u'url': u'http://example.com:6080/rdp_auto.html',
                            u'type': u'rdp'}}
    TEST.servers.rdp_console_data = console

    # Floating IPs
    def generate_fip(conf):
        return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
                                       conf)

    fip_1 = {'id': 1,
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_2 = {'id': 2,
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2))

    TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
                          nova.FloatingIp(generate_fip(fip_2)))

    # Floating IP with UUID id (for Floating IP with Neutron Proxy)
    fip_3 = {'id': str(uuid.uuid4()),
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_4 = {'id': str(uuid.uuid4()),
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))

    TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
                               nova.FloatingIp(generate_fip(fip_4)))

    # Usage
    usage_vals = {"tenant_id": TEST.tenant.id,
                  "instance_name": server_1.name,
                  "flavor_name": flavor_1.name,
                  "flavor_vcpus": flavor_1.vcpus,
                  "flavor_disk": flavor_1.disk,
                  "flavor_ram": flavor_1.ram}
    usage_obj = usage.Usage(usage.UsageManager(None),
                            json.loads(USAGE_DATA % usage_vals))
    TEST.usages.add(usage_obj)

    usage_2_vals = {"tenant_id": tenant3.id,
                    "instance_name": server_3.name,
                    "flavor_name": flavor_1.name,
                    "flavor_vcpus": flavor_1.vcpus,
                    "flavor_disk": flavor_1.disk,
                    "flavor_ram": flavor_1.ram}
    usage_obj_2 = usage.Usage(usage.UsageManager(None),
                              json.loads(USAGE_DATA % usage_2_vals))
    TEST.usages.add(usage_obj_2)

    volume_snapshot = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': '40f3fabf-3613-4f5e-90e5-6c9a08333fc3',
         'display_name': 'test snapshot',
         'display_description': 'vol snap!',
         'size': 40,
         'status': 'available',
         'volume_id': '41023e92-8008-4c8b-8059-7f2293ff3775'})
    volume_snapshot2 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
         'display_name': '',
         'display_description': 'vol snap 2!',
         'size': 80,
         'status': 'available',
         'volume_id': '3b189ac8-9166-ac7f-90c9-16c8bf9e01ac'})
    TEST.volume_snapshots.add(volume_snapshot)
    TEST.volume_snapshots.add(volume_snapshot2)

    cert_data = {'private_key': 'private',
                 'data': 'certificate_data'}
    certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
    TEST.certs.add(certificate)

    # Availability Zones
    TEST.availability_zones.add(availability_zones.AvailabilityZone(
        availability_zones.AvailabilityZoneManager(None),
        {
            'zoneName': 'nova',
            'zoneState': {'available': True},
            'hosts': {
                "host001": {
                    "nova-network": {
                        "active": True,
                        "available": True,
                    },
                },
            },
        },
    ))

    # hypervisors
    hypervisor_1 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack001", "id": 3},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 1,
        },
    )

    hypervisor_2 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack002", "id": 4},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack002",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 2,
        },
    )
    hypervisor_3 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "instance-host", "id": 5},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack003",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 3,
        },
    )
    TEST.hypervisors.add(hypervisor_1)
    TEST.hypervisors.add(hypervisor_2)
    TEST.hypervisors.add(hypervisor_3)

    TEST.hypervisors.stats = {
        "hypervisor_statistics": {
            "count": 5,
            "vcpus_used": 3,
            "local_gb_used": 15,
            "memory_mb": 483310,
            "current_workload": 0,
            "vcpus": 160,
            "running_vms": 3,
            "free_disk_gb": 12548,
            "disk_available_least": 12556,
            "local_gb": 12563,
            "free_ram_mb": 428014,
            "memory_mb_used": 55296,
        }
    }

    # Services
    service_1 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-conductor",
        "zone": "internal",
        "state": "up",
        "updated_at": "2013-07-08T05:21:00.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_2 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T05:20:51.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_3 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "down",
        "updated_at": "2013-07-08T04:20:51.000000",
        "host": "devstack002",
        "disabled_reason": None,
    })

    TEST.services.add(service_1)
    TEST.services.add(service_2)
    TEST.services.add(service_3)

    # Aggregates
    aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "foo",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 1,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "bar",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 2,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    TEST.aggregates.add(aggregate_1)
    TEST.aggregates.add(aggregate_2)

    host1 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack001",
        "service": "compute",
        "zone": "testing",
    })

    host2 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack002",
        "service": "nova-conductor",
        "zone": "testing",
    })

    host3 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack003",
        "service": "compute",
        "zone": "testing",
    })

    host4 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack004",
        "service": "compute",
        "zone": "testing",
    })

    TEST.hosts.add(host1)
    TEST.hosts.add(host2)
    TEST.hosts.add(host3)
    TEST.hosts.add(host4)

Example 37

Project: MediaBrowser.Kodi Source File: PlaybackUtils.py
    def PLAY(self, url, handle):
        self.logMsg("== ENTER: PLAY ==")
        xbmcgui.Window(10000).setProperty("ThemeMediaMB3Disable", "true")
        url = urllib.unquote(url)
        
        urlParts = url.split(',;')
        self.logMsg("PLAY ACTION URL PARTS : " + str(urlParts))
        server = urlParts[0]
        id = urlParts[1]
        autoResume = 0
        if(len(urlParts) > 2):
            autoResume = int(urlParts[2])
            self.logMsg("PLAY ACTION URL AUTO RESUME : " + str(autoResume))
        
        ip,port = server.split(':')
        userid = self.downloadUtils.getUserId()
        seekTime = 0
        resume = 0
        
        id = urlParts[1]
        jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + id + "?format=json&ImageTypeLimit=1", suppress=False, popup=1 )
        if(jsonData == ""):
            return
        result = json.loads(jsonData)
        
        # Is this a strm placeholder ?
        IsStrmPlaceholder = False    
        if result.get("Path", "").endswith(".strm"):
            IsStrmPlaceholder = True
        
        resume_result = 1
        
        if IsStrmPlaceholder == False:
            if(autoResume != 0):
              if(autoResume == -1):
                resume_result = 1
              else:
                resume_result = 0
                seekTime = (autoResume / 1000) / 10000
            else:
              userData = result.get("UserData")
              resume_result = 0
                
              if userData.get("PlaybackPositionTicks") != 0:
                reasonableTicks = int(userData.get("PlaybackPositionTicks")) / 1000
                seekTime = reasonableTicks / 10000
                displayTime = str(datetime.timedelta(seconds=seekTime))
                display_list = [ self.language(30106) + ' ' + displayTime, self.language(30107)]
                resumeScreen = xbmcgui.Dialog()
                resume_result = resumeScreen.select(self.language(30105), display_list)
                if resume_result == -1:
                  return

        playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
        
        '''
        # use this to print out the current playlist info
        for x in range(0, len(playlist)):
            self.logMsg("PLAYLIST_ITEM : " + str(playlist[x].getfilename()))
        
        self.logMsg("PLAYLIST_ITEM Position : " + str(playlist.getposition()))
        if(len(playlist) > 0 and "plugin://" in playlist[playlist.getposition()].getfilename()):
            self.logMsg("PLAYLIST_ITEM Removing : " + playlist[playlist.getposition()].getfilename())
            playlist.remove(playlist[playlist.getposition()].getfilename())
        '''
        
        playlist.clear()
        # check for any intros first
        jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + id + "/Intros?format=json&ImageTypeLimit=1", suppress=False, popup=1 )     
        self.logMsg("Intros jsonData: " + jsonData)
        if(jsonData == ""):
            return        
        result = json.loads(jsonData)
                   
         # do not add intros when resume is invoked
        if result.get("Items") != None and (seekTime == 0 or resume_result == 1):
          for item in result.get("Items"):
            id = item.get("Id")
            jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + id + "?format=json&ImageTypeLimit=1", suppress=False, popup=1 )
            if(jsonData == ""):
                return            
            result = json.loads(jsonData)
            playurl = PlayUtils().getPlayUrl(server, id, result)
            self.logMsg("Play URL: " + playurl)    
            thumbPath = self.downloadUtils.getArtwork(item, "Primary")
            listItem = xbmcgui.ListItem(path=playurl, iconImage=thumbPath, thumbnailImage=thumbPath)
            self.setListItemProps(server, id, listItem, result)

            # Can not play virtual items
            if (result.get("LocationType") == "Virtual") or (result.get("IsPlaceHolder") == True):
                xbmcgui.Dialog().ok(self.language(30128), self.language(30129))
                return

            watchedurl = 'http://' + server + '/mediabrowser/Users/'+ userid + '/PlayedItems/' + id
            positionurl = 'http://' + server + '/mediabrowser/Users/'+ userid + '/PlayingItems/' + id
            deleteurl = 'http://' + server + '/mediabrowser/Items/' + id
            
            # set the current playing info
            WINDOW = xbmcgui.Window( 10000 )
            WINDOW.setProperty(playurl+"watchedurl", watchedurl)
            WINDOW.setProperty(playurl+"positionurl", positionurl)
            WINDOW.setProperty(playurl+"deleteurl", "")
         
            WINDOW.setProperty(playurl+"runtimeticks", str(result.get("RunTimeTicks")))
            WINDOW.setProperty(playurl+"type", result.get("Type"))
            WINDOW.setProperty(playurl+"item_id", id)
            
            if PlayUtils().isDirectPlay(result) == True:
              if self.settings.getSetting('playFromStream') == "true":
                playMethod = "DirectStream"
              else:
                playMethod = "DirectPlay"
            else:
              playMethod = "Transcode"
            WINDOW.setProperty(playurl+"playmethod", playMethod)
            
            mediaSources = result.get("MediaSources")
            if(mediaSources != None):
              if mediaSources[0].get('DefaultAudioStreamIndex') != None:
                WINDOW.setProperty(playurl+"AudioStreamIndex", str(mediaSources[0].get('DefaultAudioStreamIndex')))  
              if mediaSources[0].get('DefaultSubtitleStreamIndex') != None:
                WINDOW.setProperty(playurl+"SubtitleStreamIndex", str(mediaSources[0].get('DefaultSubtitleStreamIndex')))
            
            playlist.add(playurl, listItem)
       
        id = urlParts[1]
        jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + id + "?format=json&ImageTypeLimit=1", suppress=False, popup=1 )   
        if(jsonData == ""):
            return    
        self.logMsg("Play jsonData: " + jsonData)
        result = json.loads(jsonData)
        playurl = PlayUtils().getPlayUrl(server, id, result)
        self.logMsg("Play URL: " + playurl)    
        thumbPath = self.downloadUtils.getArtwork(result, "Primary")
        listItem = xbmcgui.ListItem(path=playurl, iconImage=thumbPath, thumbnailImage=thumbPath)
        self.setListItemProps(server, id, listItem, result)

        # Can not play virtual items
        if (result.get("LocationType") == "Virtual"):
          xbmcgui.Dialog().ok(self.language(30128), self.language(30129))
          return

        watchedurl = 'http://' + server + '/mediabrowser/Users/'+ userid + '/PlayedItems/' + id
        positionurl = 'http://' + server + '/mediabrowser/Users/'+ userid + '/PlayingItems/' + id
        deleteurl = 'http://' + server + '/mediabrowser/Items/' + id

        # set the current playing info
        WINDOW = xbmcgui.Window( 10000 )
        WINDOW.setProperty(playurl+"watchedurl", watchedurl)
        WINDOW.setProperty(playurl+"positionurl", positionurl)
        WINDOW.setProperty(playurl+"deleteurl", "")
        if result.get("Type")=="Episode" and self.settings.getSetting("offerDelete")=="true":
          WINDOW.setProperty(playurl+"deleteurl", deleteurl)
        
        if result.get("Type")=="Episode":
            WINDOW.setProperty(playurl+"refresh_id", result.get("SeriesId"))
        else:
            WINDOW.setProperty(playurl+"refresh_id", id)
            
        WINDOW.setProperty(playurl+"runtimeticks", str(result.get("RunTimeTicks")))
        WINDOW.setProperty(playurl+"type", result.get("Type"))
        WINDOW.setProperty(playurl+"item_id", id)
        
        if PlayUtils().isDirectPlay(result) == True:
          if self.settings.getSetting('playFromStream') == "true":
            playMethod = "DirectStream"
          else:
            playMethod = "DirectPlay"
        else:
          playMethod = "Transcode"
        if IsStrmPlaceholder == True:
            playMethod = "DirectStream"
          
        WINDOW.setProperty(playurl+"playmethod", playMethod)
            
        mediaSources = result.get("MediaSources")
        if(mediaSources != None):
          if mediaSources[0].get('DefaultAudioStreamIndex') != None:
            WINDOW.setProperty(playurl+"AudioStreamIndex", str(mediaSources[0].get('DefaultAudioStreamIndex')))  
          if mediaSources[0].get('DefaultSubtitleStreamIndex') != None:
            WINDOW.setProperty(playurl+"SubtitleStreamIndex", str(mediaSources[0].get('DefaultSubtitleStreamIndex')))
        
        playlist.add(playurl, listItem)
        
        if self.settings.getSetting("autoPlaySeason")=="true" and result.get("Type")=="Episode":
            # add remaining unplayed episodes if applicable
            seasonId = result.get("SeasonId")
            jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items?ParentId=" + seasonId + "&ImageTypeLimit=1&StartIndex=1&SortBy=SortName&SortOrder=Ascending&Filters=IsUnPlayed&IncludeItemTypes=Episode&IsVirtualUnaired=false&Recursive=true&IsMissing=False&format=json", suppress=False, popup=1 )     
            if(jsonData == ""):
                return
            result = json.loads(jsonData)
            if result.get("Items") != None:
              for item in result.get("Items"):
                id = item.get("Id")
                jsonData = self.downloadUtils.downloadUrl("http://" + server + "/mediabrowser/Users/" + userid + "/Items/" + id + "?format=json&ImageTypeLimit=1", suppress=False, popup=1 )
                if(jsonData == ""):
                    return
                result = json.loads(jsonData)
                playurl = PlayUtils().getPlayUrl(server, id, result)
                self.logMsg("Play URL: " + playurl)    
                thumbPath = self.downloadUtils.getArtwork(item, "Primary")
                listItem = xbmcgui.ListItem(path=playurl, iconImage=thumbPath, thumbnailImage=thumbPath)
                self.setListItemProps(server, id, listItem, result)
        
                watchedurl = 'http://' + server + '/mediabrowser/Users/'+ userid + '/PlayedItems/' + id
                positionurl = 'http://' + server + '/mediabrowser/Users/'+ userid + '/PlayingItems/' + id
                deleteurl = 'http://' + server + '/mediabrowser/Items/' + id
                
                # set the current playing info
                WINDOW = xbmcgui.Window( 10000 )
                WINDOW.setProperty(playurl+"watchedurl", watchedurl)
                WINDOW.setProperty(playurl+"positionurl", positionurl)
                WINDOW.setProperty(playurl+"deleteurl", "")
             
                WINDOW.setProperty(playurl+"runtimeticks", str(result.get("RunTimeTicks")))
                WINDOW.setProperty(playurl+"type", result.get("Type"))
                WINDOW.setProperty(playurl+"item_id", id)
                WINDOW.setProperty(playurl+"refresh_id", result.get("SeriesId"))
                
                if PlayUtils().isDirectPlay(result) == True:
                  if self.settings.getSetting('playFromStream') == "true":
                    playMethod = "DirectStream"
                  else:
                    playMethod = "DirectPlay"
                else:
                  playMethod = "Transcode"
                WINDOW.setProperty(playurl+"playmethod", playMethod)
                
                mediaSources = result.get("MediaSources")
                if(mediaSources != None):
                  if mediaSources[0].get('DefaultAudioStreamIndex') != None:
                    WINDOW.setProperty(playurl+"AudioStreamIndex", str(mediaSources[0].get('DefaultAudioStreamIndex')))  
                  if mediaSources[0].get('DefaultSubtitleStreamIndex') != None:
                    WINDOW.setProperty(playurl+"SubtitleStreamIndex", str(mediaSources[0].get('DefaultSubtitleStreamIndex')))
                
                playlist.add(playurl, listItem)
        
        xbmc.Player().play(playlist)
        
        #If resuming then wait for playback to start and then
        #seek to position
        if resume_result == 0:
            self.seekToPosition(seekTime)

Example 38

Project: SchoolIdolAPI Source File: quickupdate.py
    def handle(self, *args, **options):
        if 'songs' in args:
            page_url = u'http://schoolido.lu/api/songs/?page_size=50&expand_event'
            while page_url is not None:
                response = urllib.urlopen(page_url)
                data = json.loads(response.read())
                page_url = data['next']
                for song in data['results']:
                    data = {
                        'romaji_name': song['romaji_name'],
                        'translated_name': song['translated_name'],
                        'attribute': song['attribute'],
                        'BPM': song['BPM'],
                        'time': song['time'],
                        'main_unit': song['main_unit'],
                        'event': models.Event.objects.get(japanese_name=song['event']['japanese_name']) if song['event'] else None,
                        'rank': song['rank'],
                        'daily_rotation': song['daily_rotation'],
                        'daily_rotation_position': song['daily_rotation_position'],
                        'image': imageURLToDatabase(song['image']),
                        'easy_difficulty': song['easy_difficulty'],
                        'easy_notes': song['easy_notes'],
                        'normal_difficulty': song['normal_difficulty'],
                        'normal_notes': song['normal_notes'],
                        'hard_difficulty': song['hard_difficulty'],
                        'hard_notes': song['hard_notes'],
                        'expert_difficulty': song['expert_difficulty'],
                        'expert_random_difficulty': song['expert_random_difficulty'],
                        'expert_notes': song['expert_notes'],
                        'master_difficulty': song['master_difficulty'],
                        'master_notes': song['master_notes'],
                        'available': song['available'],
                        'itunes_id': song['itunes_id'],
                    }
                    print u'======== Song {} ========'.format(song['name'])
                    pprint(data)
                    models.Song.objects.update_or_create(name=song['name'], defaults=data)
            return

        if 'idols' in args:
            page_url = u'http://schoolido.lu/api/idols/?page_size=50'
            while page_url is not None:
                response = urllib.urlopen(page_url)
                data = json.loads(response.read())
                page_url = data['next']
                for idol in data['results']:
                    data = {
                        'japanese_name': idol['japanese_name'],
                        'main': idol['main'],
                        'main_unit': idol['main_unit'],
                        'sub_unit': idol['sub_unit'],
                        'age': idol['age'],
                        'school': idol['school'],
                        'birthday': datetime.datetime.strptime(idol['birthday'], '%m-%d').date() if idol['birthday'] else None,
                        'astrological_sign': idol['astrological_sign'],
                        'blood': idol['blood'],
                        'height': idol['height'],
                        'measurements': idol['measurements'],
                        'favorite_food': idol['favorite_food'],
                        'least_favorite_food': idol['least_favorite_food'],
                        'hobbies': idol['hobbies'],
                        'attribute': idol['attribute'],
                        'year': idol['year'],
                        'cv': idol['cv']['name'] if idol['cv'] else None,
                        'cv_url': idol['cv']['url'] if idol['cv'] else None,
                        'cv_nickname': idol['cv']['nickname'] if idol['cv'] else None,
                        'cv_twitter': idol['cv']['twitter'] if idol['cv'] else None,
                        'cv_instagram': idol['cv']['instagram'] if idol['cv'] else None,
                        'official_url': idol['official_url'],
                        'summary': idol['summary'],
                    }
                    print u'======== Idol {} ========'.format(idol['name'])
                    pprint(data)
                    models.Idol.objects.update_or_create(name=idol['name'], defaults=data)
            return

        if 'events' in args:
            page_url = u'http://schoolido.lu/api/events/?page_size=50'
            while page_url is not None:
                response = urllib.urlopen(page_url)
                data = json.loads(response.read())
                page_url = data['next']
                for event in data['results']:
                    data = {
                        'romaji_name': event['romaji_name'],
                        'english_name': event['english_name'],
                        'english_t1_points': event['english_t1_points'],
                        'english_t1_rank': event['english_t1_rank'],
                        'english_t2_points': event['english_t2_points'],
                        'english_t2_rank': event['english_t2_rank'],
                        'japanese_t1_points': event['japanese_t1_points'],
                        'japanese_t1_rank': event['japanese_t1_rank'],
                        'japanese_t2_points': event['japanese_t2_points'],
                        'japanese_t2_rank': event['japanese_t2_rank'],
                        'note': event['note'],
                        'image': imageURLToDatabase(event['image']),
                        'english_image': imageURLToDatabase(event['english_image']),
                        'beginning': event['beginning'],
                        'end': event['end'],
                        'english_beginning': event['english_beginning'],
                        'english_end': event['english_end'],
                        'english_name': event['english_name'],
                    }
                    print u'======== Event {} ========'.format(event['japanese_name'])
                    pprint(data)
                    models.Event.objects.update_or_create(japanese_name=event['japanese_name'], defaults=data)
            return

        if 'cards' in args:
            page_url = u'http://schoolido.lu/api/cards/?page_size=50&ordering=-id'
            while page_url is not None:
                response = urllib.urlopen(page_url)
                data = json.loads(response.read())
                page_url = data['next']
                for card in data['results']:
                    data = {}
                    data['idol'] = models.Idol.objects.get(name=card['idol']['name'])
                    if card['event']:
                        data['event'] = models.Event.objects.get(japanese_name=card['event']['japanese_name'])
                    if card['event']:
                        data['event'] = models.Event.objects.get(japanese_name=card['event']['japanese_name'])
                    data['game_id'] = card['game_id']
                    data['japanese_collection'] = card['japanese_collection']
                    #data['english_collection'] = card['english_collection']
                    data['translated_collection'] = card['translated_collection']
                    data['rarity'] = card['rarity']
                    data['attribute'] = card['attribute']
                    data['is_promo'] = card['is_promo']
                    data['promo_item'] = card['promo_item']
                    data['promo_link'] = card['promo_link']
                    data['release_date'] = card['release_date']
                    data['is_special'] = card['is_special']
                    data['japan_only'] = card['japan_only']
                    #data['seal_shop'] = card['seal_shop']
                    data['hp'] = card['hp']
                    data['minimum_statistics_smile'] = card['minimum_statistics_smile']
                    data['minimum_statistics_pure'] = card['minimum_statistics_pure']
                    data['minimum_statistics_cool'] = card['minimum_statistics_cool']
                    data['non_idolized_maximum_statistics_smile'] = card['non_idolized_maximum_statistics_smile']
                    data['non_idolized_maximum_statistics_pure'] = card['non_idolized_maximum_statistics_pure']
                    data['non_idolized_maximum_statistics_cool'] = card['non_idolized_maximum_statistics_cool']
                    data['idolized_maximum_statistics_smile'] = card['idolized_maximum_statistics_smile']
                    data['idolized_maximum_statistics_pure'] = card['idolized_maximum_statistics_pure']
                    data['idolized_maximum_statistics_cool'] = card['idolized_maximum_statistics_cool']
                    data['skill'] = card['skill']
                    data['japanese_skill'] = card['japanese_skill']
                    data['skill_details'] = card['skill_details']
                    data['japanese_skill_details'] = card['japanese_skill_details']
                    data['center_skill'] = card['center_skill']
                    data['transparent_image'] = imageURLToDatabase(card['transparent_image'])
                    data['transparent_idolized_image'] = imageURLToDatabase(card['transparent_idolized_image'])
                    data['card_image'] = imageURLToDatabase(card['card_image'])
                    data['card_idolized_image'] = imageURLToDatabase(card['card_idolized_image'])
                    data['english_card_image'] = imageURLToDatabase(card['english_card_image'])
                    data['english_card_idolized_image'] = imageURLToDatabase(card['english_card_idolized_image'])
                    data['round_card_image'] = imageURLToDatabase(card['round_card_image'])
                    data['round_card_idolized_image'] = imageURLToDatabase(card['round_card_idolized_image'])
                    data['english_round_card_image'] = imageURLToDatabase(card['english_round_card_image'])
                    data['english_round_card_idolized_image'] = imageURLToDatabase(card['english_round_card_idolized_image'])
                    data['video_story'] = card['video_story']
                    data['japanese_video_story'] = card['japanese_video_story']
                    print '======== Card #{} ========'.format(card['id'])
                    pprint(data)
                    models.Card.objects.update_or_create(id=card['id'], defaults=data)
            return

        if 'ur_pairs' in args:
            page_url = u'http://schoolido.lu/api/cards/?page_size=50&rarity=UR'
            while page_url is not None:
                response = urllib.urlopen(page_url)
                data = json.loads(response.read())
                page_url = data['next']
                for card in data['results']:
                    data = {}
                    pprint(card)
                    data['ur_pair'] = models.Card.objects.get(pk=card['ur_pair']['card']['id']) if card['ur_pair'] else None
                    data['ur_pair_reverse'] = card['ur_pair']['reverse_display'] if card['ur_pair'] else False
                    data['ur_pair_idolized_reverse'] = card['ur_pair']['reverse_display_idolized'] if card['ur_pair'] else False
                    data['clean_ur'] = card['clean_ur']
                    data['clean_ur_idolized'] = imageURLToDatabase(card['clean_ur_idolized'])
                    print '======== Card #{} ========'.format(card['id'])
                    pprint(data)
                    models.Card.objects.update_or_create(id=card['id'], defaults=data)
            return

        if 'imageURLs' in args:
            cards = models.Card.objects.all()
            for card in cards:
                card.card_idolized_image = 'cards/' + str(card.id) + 'idolized' + card.name.split(' ')[-1] + '.png'
                card.transparent_idolized_image = 'cards/transparent/' + str(card.id) + 'idolizedTransparent.png'
                card.round_card_idolized_image = 'cards/' + str(card.id) + 'RoundIdolized' + card.name.split(' ')[-1] + '.png'
                if not card.is_special and not card.is_promo:
                    card.card_image = 'cards/' + str(card.id) + card.name.split(' ')[-1] + '.png'
                    card.transparent_image = 'cards/transparent/' + str(card.id) + 'Transparent.png'
                    card.round_card_image = 'cards/' + str(card.id) + 'Round' + card.name.split(' ')[-1] + '.png'
                else:
                    card.card_image = None
                    card.transparent_image = None
                    card.round_card_image = None
                card.save()
            return

Example 39

Project: MediaBrowser.Kodi Source File: RecentItems.py
    def updateRecent(self):
        self.logMsg("updateRecent Called")
        useBackgroundData = xbmcgui.Window(10000).getProperty("BackgroundDataLoaded") == "true"
        
        addonSettings = xbmcaddon.Addon(id='plugin.video.xbmb3c')
        mb3Host = addonSettings.getSetting('ipaddress')
        mb3Port = addonSettings.getSetting('port')    
        userName = addonSettings.getSetting('username')     
        
        downloadUtils = DownloadUtils()
        db = Database()
        
        userid = downloadUtils.getUserId()
        
        self.logMsg("UserName : " + userName + " UserID : " + userid)
        
        self.logMsg("Updating Recent Movie List")
        
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=30&Recursive=true&SortBy=DateCreated&Fields=Path,Genres,MediaStreams,Overview,ShortOverview,CriticRatingSummary&SortOrder=Descending&Filters=IsUnplayed,IsNotFolder&IncludeItemTypes=Movie&format=json"
         
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        if(jsonData == ""):
            return
            
        result = json.loads(jsonData)
        self.logMsg("Recent Movie Json Data : " + str(result), level=2)
        
        result = result.get("Items")
        if(result == None):
            result = []
            
        WINDOW = xbmcgui.Window( 10000 )

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
            
            rating = item.get("CommunityRating")
            criticrating = item.get("CriticRating")
            officialrating = item.get("OfficialRating")
            criticratingsummary = ""
            if(item.get("CriticRatingSummary") != None):
                criticratingsummary = item.get("CriticRatingSummary").encode('utf-8')
            plot = item.get("Overview")
            if plot == None:
                plot=''
            plot=plot.encode('utf-8')
            shortplot = item.get("ShortOverview")
            if shortplot == None:
                shortplot = ''
            shortplot = shortplot.encode('utf-8')
            year = item.get("ProductionYear")
            if(item.get("RunTimeTicks") != None):
                runtime = str(int(item.get("RunTimeTicks"))/(10000000*60))
            else:
                runtime = "0"

            item_id = item.get("Id")
             
            if useBackgroundData != True:
                poster = downloadUtils.getArtwork(item, "Primary3")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(item, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                landscape = downloadUtils.getArtwork(item, "Thumb3")
                discart = downloadUtils.getArtwork(item, "Disc")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumb = downloadUtils.getArtwork(item, "Thumb3")
                else:
                    realthumb = medium_fanart
            else:
                poster = db.get(item_id +".Primary3")
                thumbnail = db.get(item_id +".Primary")
                logo = db.get(item_id +".Logo")
                fanart = db.get(item_id +".Backdrop")
                landscape = db.get(item_id +".Thumb3")
                discart = db.get(item_id +".Disc")
                medium_fanart = db.get(item_id +".Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumb = db.get(item_id +".Thumb3")
                else:
                    realthumb = medium_fanart  
            
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            # play or show info
            selectAction = addonSettings.getSetting('selectAction')
            if(selectAction == "1"):
                playUrl = "plugin://plugin.video.xbmb3c/?id=" + item_id + '&mode=' + str(_MODE_ITEM_DETAILS)
            else:
                playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
                      
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("LatestMovieMB3." + str(item_count) + ".Title = " + title, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Art(fanart)  = " + fanart, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Art(landscape)  = " + landscape, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Art(discart)  = " + discart, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Art(clearlogo)  = " + logo, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Art(poster)  = " + thumbnail, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Rating  = " + str(rating), level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".CriticRating  = " + str(criticrating), level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".CriticRatingSummary  = " + criticratingsummary, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Year  = " + str(year), level=2)
            self.logMsg("LatestMovieMB3." + str(item_count) + ".Runtime  = " + str(runtime), level=2)
            
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Title", title)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Path", playUrl)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Art(fanart)", fanart)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Art(landscape)", landscape)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Art(discart)", discart)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Art(clearlogo)", logo)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Art(poster)", thumbnail)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Rating", str(rating))
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Mpaa", str(officialrating))
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".CriticRating", str(criticrating))
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".CriticRatingSummary", criticratingsummary)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Plot", plot)
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".ShortPlot", shortplot)
            
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Year", str(year))
            WINDOW.setProperty("LatestMovieMB3." + str(item_count) + ".Runtime", str(runtime))
            
            WINDOW.setProperty("LatestMovieMB3.Enabled", "true")
            
            item_count = item_count + 1
            
        #Updating Recent Unplayed Movie List
        self.logMsg("Updating Recent Unplayed Movie List")
        
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items/Latest?Limit=30&SortBy=DateCreated&Fields=Path,Genres,MediaStreams,Overview,ShortOverview,CriticRatingSummary&IsPlayed=false&IncludeItemTypes=Movie&format=json"
         
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        result = json.loads(jsonData)
        self.logMsg("Recent Unplayed Movie Json Data : " + str(result), level=2)
        
        if(result == None):
            result = []
            
        WINDOW = xbmcgui.Window( 10000 )

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
            
            rating = item.get("CommunityRating")
            criticrating = item.get("CriticRating")
            officialrating = item.get("OfficialRating")
            criticratingsummary = ""
            if(item.get("CriticRatingSummary") != None):
                criticratingsummary = item.get("CriticRatingSummary").encode('utf-8')
            plot = item.get("Overview")
            if plot == None:
                plot=''
            plot=plot.encode('utf-8')
            shortplot = item.get("ShortOverview")
            if shortplot == None:
                shortplot = ''
            shortplot = shortplot.encode('utf-8')
            year = item.get("ProductionYear")
            if(item.get("RunTimeTicks") != None):
                runtime = str(int(item.get("RunTimeTicks"))/(10000000*60))
            else:
                runtime = "0"

            item_id = item.get("Id")
              
            if useBackgroundData != True:
                poster = downloadUtils.getArtwork(item, "Primary3")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(item, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                landscape = downloadUtils.getArtwork(item, "Thumb3")
                discart = downloadUtils.getArtwork(item, "Disc")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumb = downloadUtils.getArtwork(item, "Thumb3")
                else:
                    realthumb = medium_fanart
            else:
                poster = db.get(item_id +".Primary3")
                thumbnail = db.get(item_id +".Primary")
                logo = db.get(item_id +".Logo")
                fanart = db.get(item_id +".Backdrop")
                landscape = db.get(item_id +".Thumb3")
                discart = db.get(item_id +".Disc")
                medium_fanart = db.get(item_id +".Backdrop3")
                
                if item.get("ImageTags").get("Thumb") != None:
                    realthumb = db.get(item_id +".Thumb3")
                else:
                    realthumb = medium_fanart  
            
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            selectAction = addonSettings.getSetting('selectAction')
            if(selectAction == "1"):
                playUrl = "plugin://plugin.video.xbmb3c/?id=" + item_id + '&mode=' + str(_MODE_ITEM_DETAILS)
            else:
                playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Title = " + title, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Thumb = " + realthumb, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Art(fanart)  = " + fanart, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Art(clearlogo)  = " + logo, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Art(poster)  = " + thumbnail, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Rating  = " + str(rating), level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".CriticRating  = " + str(criticrating), level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".CriticRatingSummary  = " + criticratingsummary, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Year  = " + str(year), level=2)
            self.logMsg("LatestUnplayedMovieMB3." + str(item_count) + ".Runtime  = " + str(runtime), level=2)
            
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Title", title)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Thumb", realthumb)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Path", playUrl)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Art(fanart)", fanart)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Art(landscape)", landscape)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Art(medium_fanart)", medium_fanart)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Art(clearlogo)", logo)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Art(poster)", thumbnail)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Rating", str(rating))
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Mpaa", str(officialrating))
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".CriticRating", str(criticrating))
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".CriticRatingSummary", criticratingsummary)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Plot", plot)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".ShortPlot", shortplot)
            
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Year", str(year))
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".Runtime", str(runtime))
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".ItemGUID", item_id)
            WINDOW.setProperty("LatestUnplayedMovieMB3." + str(item_count) + ".id", item_id)
            
            
            WINDOW.setProperty("LatestUnplayedMovieMB3.Enabled", "true")
            
            item_count = item_count + 1
        
        #Updating Recent TV Show List
        self.logMsg("Updating Recent TV Show List")
        
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=30&Recursive=true&SortBy=DateCreated&Fields=Path,Genres,MediaStreams,ShortOverview,Overview&SortOrder=Descending&Filters=IsUnplayed,IsNotFolder&IsVirtualUnaired=false&IsMissing=False&IncludeItemTypes=Episode&format=json"
        
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        result = json.loads(jsonData)
        self.logMsg("Recent TV Show Json Data : " + str(result), level=2)
        
        result = result.get("Items")
        if(result == None):
            result = []   

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
                
            seriesName = "Missing Name"
            if(item.get("SeriesName") != None):
                seriesName = item.get("SeriesName").encode('utf-8')   

            eppNumber = "X"
            tempEpisodeNumber = "00"
            if(item.get("IndexNumber") != None):
                eppNumber = item.get("IndexNumber")
                if eppNumber < 10:
                  tempEpisodeNumber = "0" + str(eppNumber)
                else:
                  tempEpisodeNumber = str(eppNumber)
            
            seasonNumber = item.get("ParentIndexNumber")
            if seasonNumber < 10:
              tempSeasonNumber = "0" + str(seasonNumber)
            else:
              tempSeasonNumber = str(seasonNumber)
            rating = str(item.get("CommunityRating"))
            plot = item.get("Overview")
            if plot == None:
                plot=''
            plot=plot.encode('utf-8')
            shortplot = item.get("ShortOverview")
            if shortplot == None:
                shortplot = ''
            shortplot = shortplot.encode('utf-8')
            item_id = item.get("Id")
           
            seriesId = item.get("SeriesId")          
              
            if useBackgroundData != True:
                seriesJsonData = downloadUtils.downloadUrl("http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items/" + seriesId + "?format=json", suppress=True, popup=1 )
                seriesResult = json.loads(seriesJsonData)      
                officialrating = seriesResult.get("OfficialRating")        
                poster = downloadUtils.getArtwork(seriesResult, "Primary3")
                small_poster = downloadUtils.getArtwork(seriesResult, "Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(seriesResult, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                banner = downloadUtils.getArtwork(item, "Banner")
                if (seriesResult.get("ImageTags") != None and seriesResult.get("ImageTags").get("Thumb") != None):
                  seriesthumbnail = downloadUtils.getArtwork(seriesResult, "Thumb3")
                else:
                  seriesthumbnail = medium_fanart 
            else:
                officialrating = db.get(seriesId + ".OfficialRating")
                poster = db.get(seriesId + ".Primary3")
                small_poster = db.get(seriesId + ".Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = db.get(seriesId + ".Logo")
                fanart = db.get(seriesId + ".Backdrop")
                medium_fanart = db.get(seriesId + ".Backdrop3")
                banner = db.get(seriesId + ".Banner")
                if item.get("SeriesThumbImageTag") != None:
                   seriesthumbnail = db.get(seriesId + ".Thumb3")
                else:
                   seriesthumbnail = fanart
              
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            selectAction = addonSettings.getSetting('selectAction')
            if(selectAction == "1"):
                playUrl = "plugin://plugin.video.xbmb3c/?id=" + item_id + '&mode=' + str(_MODE_ITEM_DETAILS)
            else:
                playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".EpisodeTitle = " + title, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".ShowTitle = " + seriesName, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".EpisodeNo = " + tempEpisodeNumber, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".SeasonNo = " + tempSeasonNumber, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Rating  = " + rating, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)  = " + fanart, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)  = " + logo, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)  = " + banner, level=2)  
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)  = " + poster, level=2)
            self.logMsg("LatestEpisodeMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".EpisodeTitle", title)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".ShowTitle", seriesName)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".EpisodeNo", tempEpisodeNumber)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".SeasonNo", tempSeasonNumber)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".SeriesThumb", seriesthumbnail)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Path", playUrl)            
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Rating", rating)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)", fanart)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)", logo)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)", banner)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)", poster)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".Plot", plot)
            WINDOW.setProperty("LatestEpisodeMB3." + str(item_count) + ".ShortPlot", shortplot)
            
            WINDOW.setProperty("LatestEpisodeMB3.Enabled", "true")
            
            item_count = item_count + 1
            
        #Updating Recent Unplayed TV Show List
        self.logMsg("Updating Recent Unplayed TV Show List")
                                                                                           
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items/Latest?Limit=30&SortBy=DateCreated&Fields=Path,Genres,MediaStreams,ShortOverview,Overview&IsPlayed=false&GroupItems=false&IncludeItemTypes=Episode&format=json"
        
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        result = json.loads(jsonData)
        self.logMsg("Recent Unplayed TV Show Json Data : " + str(result), level=2)
        
        if(result == None):
            result = []   

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
                
            seriesName = "Missing Name"
            if(item.get("SeriesName") != None):
                seriesName = item.get("SeriesName").encode('utf-8')   

            eppNumber = "X"
            tempEpisodeNumber = "00"
            if(item.get("IndexNumber") != None):
                eppNumber = item.get("IndexNumber")
                if eppNumber < 10:
                  tempEpisodeNumber = "0" + str(eppNumber)
                else:
                  tempEpisodeNumber = str(eppNumber)
            
            seasonNumber = item.get("ParentIndexNumber")
            if seasonNumber < 10:
              tempSeasonNumber = "0" + str(seasonNumber)
            else:
              tempSeasonNumber = str(seasonNumber)
            rating = str(item.get("CommunityRating"))
            plot = item.get("Overview")
            if plot == None:
                plot=''
            plot=plot.encode('utf-8')
            shortplot = item.get("ShortOverview")
            if shortplot == None:
                shortplot = ''
            shortplot = shortplot.encode('utf-8')
            item_id = item.get("Id")
           
            seriesId = item.get("SeriesId")
              
            if useBackgroundData != True:
                seriesJsonData = downloadUtils.downloadUrl("http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items/" + seriesId + "?format=json", suppress=True, popup=1 )
                seriesResult = json.loads(seriesJsonData)      
                officialrating = seriesResult.get("OfficialRating")        
                poster = downloadUtils.getArtwork(seriesResult, "Primary3")
                small_poster = downloadUtils.getArtwork(seriesResult, "Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = downloadUtils.getArtwork(seriesResult, "Logo")
                fanart = downloadUtils.getArtwork(item, "Backdrop")
                medium_fanart = downloadUtils.getArtwork(item, "Backdrop3")
                banner = downloadUtils.getArtwork(item, "Banner")
                if (seriesResult.get("ImageTags") != None and seriesResult.get("ImageTags").get("Thumb") != None):
                  seriesthumbnail = downloadUtils.getArtwork(seriesResult, "Thumb3")
                else:
                  seriesthumbnail = medium_fanart 
            else:
                officialrating = db.get(seriesId + ".OfficialRating")
                poster = db.get(seriesId + ".Primary3")
                small_poster = db.get(seriesId + ".Primary2")
                thumbnail = downloadUtils.getArtwork(item, "Primary")
                logo = db.get(seriesId + ".Logo")
                fanart = db.get(seriesId + ".Backdrop")
                medium_fanart = db.get(seriesId + ".Backdrop3")
                banner = db.get(seriesId + ".Banner")
                if item.get("SeriesThumbImageTag") != None:
                   seriesthumbnail = db.get(seriesId + ".Thumb3")
                else:
                   seriesthumbnail = fanart
              
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            selectAction = addonSettings.getSetting('selectAction')
            if(selectAction == "1"):
                playUrl = "plugin://plugin.video.xbmb3c/?id=" + item_id + '&mode=' + str(_MODE_ITEM_DETAILS)
            else:
                playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".EpisodeTitle = " + title, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".ShowTitle = " + seriesName, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".EpisodeNo = " + tempEpisodeNumber, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".SeasonNo = " + tempSeasonNumber, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Rating  = " + rating, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)  = " + fanart, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)  = " + logo, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)  = " + banner, level=2)  
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)  = " + poster, level=2)
            self.logMsg("LatestUnplayedEpisodeMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".EpisodeTitle", title)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".ShowTitle", seriesName)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".EpisodeNo", tempEpisodeNumber)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".SeasonNo", tempSeasonNumber)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".SeriesThumb", seriesthumbnail)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Path", playUrl)            
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Rating", rating)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.fanart)", fanart)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.medium_fanart)", medium_fanart)
            
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.clearlogo)", logo)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.banner)", banner)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Art(tvshow.poster)", poster)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".Plot", plot)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".ShortPlot", shortplot)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".ItemGUID", seriesId)
            WINDOW.setProperty("LatestUnplayedEpisodeMB3." + str(item_count) + ".id", item_id)
            
            
            WINDOW.setProperty("LatestUnplayedEpisodeMB3.Enabled", "true")
            
            item_count = item_count + 1
            
        #Updating Recent MusicList
        self.logMsg("Updating Recent MusicList")
    
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=10&Recursive=true&SortBy=DateCreated&Fields=Path,Genres,MediaStreams,Overview&SortOrder=Descending&Filters=IsUnplayed,IsFolder&IsVirtualUnaired=false&IsMissing=False&IncludeItemTypes=MusicAlbum&format=json"
    
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        result = json.loads(jsonData)
        self.logMsg("Recent MusicList Json Data : " + str(result), level=2)
    
        result = result.get("Items")
        if(result == None):
          result = []   

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
                
            artist = "Missing Artist"
            if(item.get("AlbumArtist") != None):
                artist = item.get("AlbumArtist").encode('utf-8')   

            year = "0000"
            if(item.get("ProductionYear") != None):
              year = str(item.get("ProductionYear"))
            plot = "Missing Plot"
            if(item.get("Overview") != None):
              plot = item.get("Overview").encode('utf-8')

            item_id = item.get("Id")
           
            if item.get("Type") == "MusicAlbum":
               parentId = item.get("ParentLogoItemId")
            
            thumbnail = downloadUtils.getArtwork(item, "Primary")
            logo = downloadUtils.getArtwork(item, "Logo")
            fanart = downloadUtils.getArtwork(item, "Backdrop")
            banner = downloadUtils.getArtwork(item, "Banner")
            
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Title = " + title, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Artist = " + artist, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Year = " + year, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Art(fanart)  = " + fanart, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Art(clearlogo)  = " + logo, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Art(banner)  = " + banner, level=2)  
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Art(poster)  = " + thumbnail, level=2)
            self.logMsg("LatestAlbumMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            
            
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Title", title)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Artist", artist)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Year", year)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Path", playUrl)            
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Rating", rating)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Art(fanart)", fanart)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Art(clearlogo)", logo)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Art(banner)", banner)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Art(poster)", thumbnail)
            WINDOW.setProperty("LatestAlbumMB3." + str(item_count) + ".Plot", plot)
            
            WINDOW.setProperty("LatestAlbumMB3.Enabled", "true")
            
            item_count = item_count + 1

        #Updating Recent Photo
        self.logMsg("Updating Recent Photo")
    
        recentUrl = "http://" + mb3Host + ":" + mb3Port + "/mediabrowser/Users/" + userid + "/Items?Limit=10&Recursive=true&SortBy=DateCreated&Fields=Path,Genres,MediaStreams,Overview&SortOrder=Descending&Filters=IsUnplayed&IsVirtualUnaired=false&IsMissing=False&IncludeItemTypes=Photo&format=json"
    
        jsonData = downloadUtils.downloadUrl(recentUrl, suppress=True, popup=1 )
        result = json.loads(jsonData)
        self.logMsg("Recent Photo Json Data : " + str(result), level=2)
    
        result = result.get("Items")
        if(result == None):
          result = []   

        item_count = 1
        for item in result:
            title = "Missing Title"
            if(item.get("Name") != None):
                title = item.get("Name").encode('utf-8')
                
            
            plot = "Missing Plot"
            if(item.get("Overview") != None):
              plot = item.get("Overview").encode('utf-8')

            item_id = item.get("Id") 
            
            thumbnail = downloadUtils.getArtwork(item, "Primary")
            logo = downloadUtils.getArtwork(item, "Logo")
            fanart = downloadUtils.getArtwork(item, "Backdrop")
            banner = downloadUtils.getArtwork(item, "Banner")
            
            url =  mb3Host + ":" + mb3Port + ',;' + item_id
            playUrl = "plugin://plugin.video.xbmb3c/?url=" + url + '&mode=' + str(_MODE_BASICPLAY)
            playUrl = playUrl.replace("\\\\","smb://")
            playUrl = playUrl.replace("\\","/")    

            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Title = " + title, level=2)
            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Thumb = " + thumbnail, level=2)
            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Path  = " + playUrl, level=2)
            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Art(fanart)  = " + fanart, level=2)
            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Art(clearlogo)  = " + logo, level=2)
            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Art(banner)  = " + banner, level=2)  
            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Art(poster)  = " + thumbnail, level=2)
            self.logMsg("LatestPhotoMB3." + str(item_count) + ".Plot  = " + plot, level=2)
            
            
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Title", title)
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Thumb", thumbnail)
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Path", playUrl)            
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Art(fanart)", fanart)
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Art(clearlogo)", logo)
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Art(banner)", banner)
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Art(poster)", thumbnail)
            WINDOW.setProperty("LatestPhotoMB3." + str(item_count) + ".Plot", plot)
            
            WINDOW.setProperty("LatestPhotoMB3.Enabled", "true")
            
            item_count = item_count + 1
            

Example 40

Project: script.tvshowtime Source File: default.py
    def onNotification(self, sender, method, data):
        log('onNotification')
        log('method=%s' % method)
        if (method == 'Player.OnPlay'):
            self._setUp()
            self._total_time = player.getTotalTime()
            self._tracker.start()
            log('Player.OnPlay')
            if player.http == 'true' and player.getPlayingFile()[:4] == 'http' and re.search(r'[sS][0-9]*[eE][0-9]*', os.path.basename(player.getPlayingFile()), flags=0) :
                player.http_playing = True
                player.filename = os.path.basename(player.getPlayingFile())
                self.startcut = player.filename.find("%5B")
                self.endcut = player.filename.find("%5D")
                self.tocut = player.filename[self.startcut:self.endcut]
                player.filename = player.filename.replace(self.tocut, "")
                player.filename = player.filename.replace("%5B", "")
                player.filename = player.filename.replace("%5D", "")
                player.filename = player.filename.replace("%20", ".")
                log('tvshowtitle=%s' % player.filename)
                player.episode = FindEpisode(player.token, 0, player.filename)
                log('episode.is_found=%s' % player.episode.is_found)
                if player.episode.is_found:
                    if player.notifications == 'true':                        
                        if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                            return
                        if player.notif_scrobbling == 'false':
                            return
                        notif('%s %s %sx%s' % (__language__(32904), player.episode.showname, player.episode.season_number, player.episode.number), time=2500)
                else:
                    if player.notifications == 'true':
                        if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                            return
                        notif(__language__(32905), time=2500)
            else:
                player.http_playing = False
                response = json.loads(data) 
                log('%s' % response)
                if response.get('item').get('type') == 'episode':
                    xbmc_id = response.get('item').get('id')
                    item = self.getEpisodeTVDB(xbmc_id)    
                    log('showtitle=%s' % item['showtitle'])
                    log('season=%s' % item['season'])
                    log('episode=%s' % item['episode'])
                    log('episode_id=%s' % item['episode_id'])
                    if len(item['showtitle']) > 0 and item['season'] > 0 and item['episode'] > 0 and item['episode_id'] > 0:                   
                        player.filename = '%s.S%.2dE%.2d' % (formatName(item['showtitle']), float(item['season']), float(item['episode']))
                        log('tvshowtitle=%s' % player.filename)
                        player.episode = FindEpisode(player.token, item['episode_id'])
                        log('episode.is_found=%s' % player.episode.is_found)
                        if player.episode.is_found:
                            if player.notifications == 'true':                        
                                if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                    return
                                if player.notif_scrobbling == 'false':
                                    return
                                notif('%s %s %sx%s' % (__language__(32904), player.episode.showname, player.episode.season_number, player.episode.number), time=2500)
                        else:
                            if player.notifications == 'true':
                                if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                    return
                                notif(__language__(32905), time=2500)
                    else:
                        if player.notifications == 'true':
                            if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                return
                            notif(__language__(32905), time=2500)              
        if (method == 'Player.OnStop'): 
            self._tearDown()
            actual_percent = (self._last_pos/self._total_time)*100
            log('last_pos / total_time : %s / %s = %s %%' % (self._last_pos, self._total_time, actual_percent)) 
            log('Player.OnStop') 
            if player.http == 'true' and player.http_playing == True :
                if player.progress == 'true':
                    player.episode = FindEpisode(player.token, 0, player.filename)
                    log('episode.is_found=%s' % player.episode.is_found)
                    if player.episode.is_found:
                        log('progress=%s' % self._last_pos)
                        self.progress = SaveProgress(player.token, player.episode.id, self._last_pos)   
                        log('progress.is_set:=%s' % self.progress.is_set)  
                        if actual_percent > 90:
                            log('MarkAsWatched(*, %s, %s, %s)' % (player.filename, player.facebook, player.twitter))
                            checkin = MarkAsWatched(player.token, player.episode.id, player.facebook, player.twitter)
                            log('checkin.is_marked:=%s' % checkin.is_marked)
                            if checkin.is_marked:
                                if player.emotion == 'true':
                                    self.emotion = xbmcgui.Dialog().select('%s: %s %sx%s' % (__language__(33909), player.episode.showname, player.episode.season_number, player.episode.number), [__language__(35311), __language__(35312), __language__(35313), __language__(35314), __language__(35316), __language__(35317)])
                                    if self.emotion < 0: return
                                    if self.emotion == 0:
                                        self.emotion = 1
                                    elif self.emotion == 1:
                                        self.emotion = 2
                                    elif self.emotion == 2:
                                        self.emotion = 3
                                    elif self.emotion == 3:
                                        self.emotion = 4
                                    elif self.emotion == 4:
                                        self.emotion = 6
                                    elif self.emotion == 5:
                                        self.emotion = 7
                                    SetEmotion(player.token, player.episode.id, self.emotion)
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    if player.notif_scrobbling == 'false':
                                        return
                                    notif('%s %s %sx%s' % (__language__(32906), player.episode.showname, player.episode.season_number, player.episode.number), time=2500) 
            else:       
                response = json.loads(data) 
                log('%s' % response)
                if player.progress == 'true':
                    if response.get('item').get('type') == 'episode':
                        xbmc_id = response.get('item').get('id')
                        item = self.getEpisodeTVDB(xbmc_id)    
                        log('showtitle=%s' % item['showtitle'])
                        log('season=%s' % item['season'])
                        log('episode=%s' % item['episode'])
                        log('episode_id=%s' % item['episode_id'])
                        if len(item['showtitle']) > 0 and item['season'] > 0 and item['episode'] > 0 and item['episode_id'] > 0:                   
                            player.filename = '%s.S%.2dE%.2d' % (formatName(item['showtitle']), float(item['season']), float(item['episode']))
                            log('tvshowtitle=%s' % player.filename)
                        log('progress=%s' % self._last_pos)
                        self.progress = SaveProgress(player.token, item['episode_id'], self._last_pos)   
                        log('progress.is_set:=%s' % self.progress.is_set)                                
        if (method == 'VideoLibrary.OnUpdate'):
            log('VideoLibrary.OnUpdate')
            response = json.loads(data) 
            log('%s' % response)
            if response.get('item').get('type') == 'episode':
                xbmc_id = response.get('item').get('id')
                playcount = response.get('playcount') 
                log('playcount=%s' % playcount)
                item = self.getEpisodeTVDB(xbmc_id)    
                log('showtitle=%s' % item['showtitle'])
                log('season=%s' % item['season'])
                log('episode=%s' % item['episode'])
                log('episode_id=%s' % item['episode_id'])
                log('playcount=%s' % playcount)
                if len(item['showtitle']) > 0 and item['season'] > 0 and item['episode'] > 0 and item['episode_id'] > 0:
                    self.filename = '%s.S%.2dE%.2d' % (formatName(item['showtitle']), float(item['season']), float(item['episode']))
                    log('tvshowtitle=%s' % self.filename)
                    self.episode = FindEpisode(player.token, item['episode_id'])
                    log('episode.is_found=%s' % self.episode.is_found)
                    if self.episode.is_found:
                        if playcount is 1:
                            log('MarkAsWatched(*, %s, %s, %s)' % (self.filename, player.facebook, player.twitter))
                            checkin = MarkAsWatched(player.token, item['episode_id'], player.facebook, player.twitter)
                            log('checkin.is_marked:=%s' % checkin.is_marked)
                            if checkin.is_marked:
                                if player.emotion == 'true':
                                    self.emotion = xbmcgui.Dialog().select('%s: %s' % (__language__(33909), self.filename), [__language__(35311), __language__(35312), __language__(35313), __language__(35314), __language__(35316), __language__(35317)])
                                    if self.emotion < 0: return
                                    if self.emotion == 0:
                                        self.emotion = 1
                                    elif self.emotion == 1:
                                        self.emotion = 2
                                    elif self.emotion == 2:
                                        self.emotion = 3
                                    elif self.emotion == 3:
                                        self.emotion = 4
                                    elif self.emotion == 4:
                                        self.emotion = 6
                                    elif self.emotion == 5:
                                        self.emotion = 7
                                    SetEmotion(player.token, item['episode_id'], self.emotion)
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    if player.notif_scrobbling == 'false':
                                        return
                                    notif('%s %s %sx%s' % (__language__(32906), self.episode.showname, self.episode.season_number, self.episode.number), time=2500)
                            else:
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    notif(__language__(32907), time=2500)
                        if playcount is 0:
                            log('MarkAsUnWatched(*, %s)' % (self.filename))
                            checkin = MarkAsUnWatched(player.token, item['episode_id'])
                            log('checkin.is_unmarked:=%s' % checkin.is_unmarked)
                            if checkin.is_unmarked:
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    if player.notif_scrobbling == 'false':
                                        return
                                    notif('%s %s %sx%s' % (__language__(32908), self.episode.showname, self.episode.season_number, self.episode.number), time=2500)
                            else:
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    notif(__language__(32907), time=2500)

Example 41

Project: edx2bigquery Source File: make_user_info_combo.py
def process_file(course_id, basedir=None, datedir=None, use_dataset_latest=False):

    basedir = path(basedir or '')
    course_dir = course_id.replace('/','__')
    lfp = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest=use_dataset_latest)

    cdir = lfp
    print "Processing %s from files in %s" % (course_id, cdir)
    sys.stdout.flush()

    mypath = os.path.dirname(os.path.realpath(__file__))
    SCHEMA_FILE = '%s/schemas/schema_user_info_combo.json' % mypath
    
    the_dict_schema = schema2dict(json.loads(open(SCHEMA_FILE).read())['user_info_combo'])
    
    uic = defaultdict(dict)		# dict with key = user_id, and val = dict to be written out as JSON line
    
    def copy_elements(src, dest, fields, prefix="", skip_empty=False):
        for key in fields:
            if skip_empty and (not key in src):
                src[key] = None
            if src[key]=='NULL':
                continue
            if key=='course_id' and src[key].startswith('course-v1:'):
                # special handling for mangled "opaque keys" version of course_id, e.g. course-v1:MITx+6.00.2x_3+1T2015
                src[key] = src[key].split(':',1)[1].replace('+','/')
            dest[prefix + key] = src[key]
    
    def openfile(fn_in, mode='r', add_dir=True):
        if add_dir:
            fn = cdir / fn_in
        else:
            fn = fn_in
        if (not os.path.exists(fn)) and (not fn.endswith('.gz')):
            fn += ".gz"
        if mode=='r' and not os.path.exists(fn):
            newfn = convert_sql(fn)		# try converting from *.sql file, if that exists
            if not newfn:
                return None			# failure, no file found, return None
            fn = newfn
        if fn.endswith('.gz'):
            return gzip.GzipFile(fn, mode)
        return open(fn, mode)
    
    def tsv2csv(fn_in, fn_out):
        import csv
        fp = openfile(fn_out, 'w', add_dir=False)
        csvfp = csv.writer(fp)
        for line in openfile(fn_in, add_dir=False):
            csvfp.writerow(line[:-1].split('\t'))
        fp.close()
    
    def convert_sql(fnroot):
        '''
        Returns filename if suitable file exists or was created by conversion of tab separated values to comma separated values.
        Returns False otherwise.
        '''
        if fnroot.endswith('.gz'):
            fnroot = fnroot[:-3]
        if fnroot.endswith('.csv'):
            fnroot = fnroot[:-4]
        if os.path.exists(fnroot + ".csv"):
            return fnroot + ".csv"
        if os.path.exists(fnroot + ".csv.gz"):
            return fnroot + ".csv.gz"
        if os.path.exists(fnroot + ".sql") or os.path.exists(fnroot + ".sql.gz"):
            infn = fnroot + '.sql'
            outfn = fnroot + '.csv.gz'
            print "--> Converting %s to %s" % (infn, outfn)
            tsv2csv(infn, outfn)
            return outfn
        return False

    nusers = 0
    fields = ['username', 'email', 'is_staff', 'last_login', 'date_joined']
    for line in csv.DictReader(openfile('users.csv')):
        uid = int(line['id'])
        copy_elements(line, uic[uid], fields)
        uic[uid]['user_id'] = uid
        nusers += 1
        uic[uid]['y1_anomalous'] = None
    
    print "  %d users loaded from users.csv" % nusers

    fp = openfile('profiles.csv')
    if fp is None:
        print "--> Skipping profiles.csv, file does not exist"
    else:
        nprofiles = 0
        fields = ['name', 'language', 'location', 'meta', 'courseware', 
                  'gender', 'mailing_address', 'year_of_birth', 'level_of_education', 'goals', 
                  'allow_certificate', 'country', 'city']
        for line in csv.DictReader(fp):
            uid = int(line['user_id'])
            copy_elements(line, uic[uid], fields, prefix="profile_")
            nprofiles += 1
        print "  %d profiles loaded from profiles.csv" % nprofiles
    
    fp = openfile('enrollment.csv')
    if fp is None:
        print "--> Skipping enrollment.csv, file does not exist"
    else:
        nenrollments = 0
        fields = ['course_id', 'created', 'is_active', 'mode', ]
        for line in csv.DictReader(fp):
            uid = int(line['user_id'])
            copy_elements(line, uic[uid], fields, prefix="enrollment_")
            nenrollments += 1
        print "  %d enrollments loaded from profiles.csv" % nenrollments
    
    # see if from_mongodb files are present for this course; if so, merge in that data
    mongodir = cdir.dirname() / 'from_mongodb'
    if mongodir.exists():
        print "--> %s exists, merging in users, profile, and enrollment data from mongodb" % mongodir
        sys.stdout.flush()
        fp = gzip.GzipFile(mongodir / "users.json.gz")
        fields = ['username', 'email', 'is_staff', 'last_login', 'date_joined']
        nadded = 0
        for line in fp:
            pdata = json.loads(line)
            uid = int(pdata['_id'])
            if not uid in uic:
                copy_elements(pdata, uic[uid], fields, skip_empty=True)
                uic[uid]['user_id'] = uid
                nadded += 1
        fp.close()
        print "  %d additional users loaded from %s/users.json.gz" % (nadded, mongodir)
                
        fp = gzip.GzipFile(mongodir / "profiles.json.gz")
        fields = ['name', 'language', 'location', 'meta', 'courseware', 
                  'gender', 'mailing_address', 'year_of_birth', 'level_of_education', 'goals', 
                  'allow_certificate', 'country', 'city']
        nadd_profiles = 0
        def fix_unicode(elem, fields):
            for k in fields:
                if (k in elem) and elem[k]:
                    elem[k] = elem[k].encode('utf8')

        for line in fp:
            pdata = json.loads(line.decode('utf8'))
            uid = int(pdata['user_id'])
            if not uic[uid].get('profile_name', None):
                copy_elements(pdata, uic[uid], fields, prefix="profile_", skip_empty=True)
                fix_unicode(uic[uid], ['profile_name', 'profile_mailing_address', 'profile_goals', 'profile_location', 'profile_language'])
                uic[uid]['y1_anomalous'] = 1
                nadd_profiles += 1
        fp.close()
        print "  %d additional profiles loaded from %s/profiles.json.gz" % (nadd_profiles, mongodir)
                
        # if datedir is specified, then do not add entries from mongodb where the enrollment happened after the datedir cutoff
        cutoff = None
        if datedir:
            cutoff = "%s 00:00:00" % datedir

        fp = gzip.GzipFile(mongodir / "enrollment.json.gz")
        fields = ['course_id', 'created', 'is_active', 'mode', ]
        nadd_enrollment = 0
        n_removed_after_cutoff = 0
        for line in fp:
            pdata = json.loads(line.decode('utf8'))
            uid = int(pdata['user_id'])
            if not uic[uid].get('enrollment_course_id', None):
                if cutoff and (pdata['created'] > cutoff) and (uic[uid].get('y1_anomalous')==1):	# remove if enrolled after datedir cutoff
                    uic.pop(uid)
                    n_removed_after_cutoff += 1
                else:
                    copy_elements(pdata, uic[uid], fields, prefix="enrollment_", skip_empty=True)
                    nadd_enrollment += 1
        fp.close()
        print "  %d additional enrollments loaded from %s/enrollment.json.gz" % (nadd_enrollment, mongodir)

        print "     from mongodb files, added %s (of %s) new users (%s profiles, %s enrollments, %s after cutoff %s)" % (nadded - n_removed_after_cutoff,
                                                                                                                         nadded, nadd_profiles, nadd_enrollment,
                                                                                                                         n_removed_after_cutoff,
                                                                                                                         cutoff)
        sys.stdout.flush()

    fp = openfile('certificates.csv')
    if fp is None:
        print "--> Skipping certificates.csv, file does not exist"
    else:
        for line in csv.DictReader(fp):
            uid = int(line['user_id'])
            fields = ['download_url', 'grade', 'course_id', 'key', 'distinction', 'status', 
                      'verify_uuid', 'download_uuid', 'name', 'created_date', 'modified_date', 'error_reason', 'mode',]
            copy_elements(line, uic[uid], fields, prefix="certificate_")
            if 'user_id' not in uic[uid]:
                uic[uid]['user_id'] = uid
    
    # sanity check for entries with user_id but missing username
    nmissing_uname = 0
    for uid, entry in uic.iteritems():
        if (not 'username' in entry) or (not entry['username']):
            nmissing_uname += 1
            if nmissing_uname < 10:
                print "missing username: %s" % entry
    print "--> %d entries missing username" % nmissing_uname
    sys.stdout.flush()
    
    # sanity check for entries missing course_id
    nmissing_cid = 0
    for uid, entry in uic.iteritems():
        if (not 'enrollment_course_id' in entry) or (not entry['enrollment_course_id']):
            nmissing_cid += 1
            entry['enrollment_course_id'] = course_id
    print "--> %d entries missing enrollment_course_id (all fixed by setting to %s)" % (nmissing_cid, course_id)
    sys.stdout.flush()

    fp = openfile('user_id_map.csv')
    if fp is None:
        print "--> Skipping user_id_map.csv, file does not exist"
    else:
        for line in csv.DictReader(fp):
            uid = int(line['id'])
            fields = ['hash_id']
            copy_elements(line, uic[uid], fields, prefix="id_map_")
    
    # sort by userid
    uidset = uic.keys()
    uidset.sort()
    
    # write out result, checking schema along the way
    
    fieldnames = the_dict_schema.keys()
    ofp = openfile('user_info_combo.json.gz', 'w')
    ocsv = csv.DictWriter(openfile('user_info_combo.csv.gz', 'w'), fieldnames=fieldnames)
    ocsv.writeheader()
    
    for uid in uidset:
        data = uic[uid]
        check_schema(uid, data, the_ds=the_dict_schema, coerce=True)
        if ('enrollment_course_id' not in data) and ('certificate_course_id' not in data):
            print "Oops!  missing course_id in user_info_combo line: inconsistent SQL?"
            print "data = %s" % data
            print "Suppressing this row"
            continue
        row_course_id = data.get('enrollment_course_id', data.get('certificate_course_id', ''))
        if not row_course_id==course_id:
            print "Oops!  course_id=%s in user_info_combo line: inconsistent with expected=%s" % (row_course_id, course_id)
            print "data = %s" % data
            print "Suppressing this row"
            continue
        ofp.write(json.dumps(data) + '\n')
        try:
            ocsv.writerow(data)
        except Exception as err:
            print "failed to write data=%s" % data
            raise
    
    print "Done with make_user_info_combo for %s" % course_id
    sys.stdout.flush()

Example 42

Project: pagure Source File: test_pagure_flask_api_issue.py
    def test_api_view_issues(self):
        """ Test the api_view_issues method of the flask api. """
        self.test_api_new_issue()

        # Invalid repo
        output = self.app.get('/api/0/foo/issues')
        self.assertEqual(output.status_code, 404)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "error": "Project not found",
              "error_code": "ENOPROJECT",
            }
        )

        # List all opened issues
        output = self.app.get('/api/0/test/issues')
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        data['issues'][0]['date_created'] = '1431414800'
        self.assertDictEqual(
            data,
            {
              "args": {
                "assignee": None,
                "author": None,
                "status": None,
                "tags": []
              },
              "total_issues": 1,
              "issues": [
                {
                  "assignee": None,
                  "blocks": [],
                  "comments": [],
                  "content": "This issue needs attention",
                  "date_created": "1431414800",
                  "close_status": None,
                  "closed_at": None,
                  "depends": [],
                  "id": 1,
                  "milestone": None,
                  "priority": None,
                  "private": False,
                  "status": "Open",
                  "tags": [],
                  "title": "test issue",
                  "user": {
                    "fullname": "PY C",
                    "name": "pingou"
                  }
                }
              ]
            }
        )

        # Create private issue
        repo = pagure.lib.get_project(self.session, 'test')
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue',
            content='We should work on this',
            user='pingou',
            ticketfolder=None,
            private=True,
        )
        self.session.commit()
        self.assertEqual(msg.title, 'Test issue')

        # Access issues un-authenticated
        output = self.app.get('/api/0/test/issues')
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        data['issues'][0]['date_created'] = '1431414800'
        self.assertDictEqual(
            data,
            {
              "args": {
                "assignee": None,
                "author": None,
                "status": None,
                "tags": []
              },
              "total_issues": 1,
              "issues": [
                {
                  "assignee": None,
                  "blocks": [],
                  "comments": [],
                  "content": "This issue needs attention",
                  "date_created": "1431414800",
                  "close_status": None,
                  "closed_at": None,
                  "depends": [],
                  "id": 1,
                  "milestone": None,
                  "priority": None,
                  "private": False,
                  "status": "Open",
                  "tags": [],
                  "title": "test issue",
                  "user": {
                    "fullname": "PY C",
                    "name": "pingou"
                  }
                }
              ]
            }
        )
        headers = {'Authorization': 'token aaabbbccc'}

        # Access issues authenticated but non-existing token
        output = self.app.get('/api/0/test/issues', headers=headers)
        self.assertEqual(output.status_code, 401)

        # Create a new token for another user
        item = pagure.lib.model.Token(
            id='bar_token',
            user_id=2,
            project_id=1,
            expiration=datetime.datetime.utcnow() + datetime.timedelta(
                days=30)
        )
        self.session.add(item)

        headers = {'Authorization': 'token bar_token'}

        # Access issues authenticated but wrong token
        output = self.app.get('/api/0/test/issues', headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        data['issues'][0]['date_created'] = '1431414800'
        self.assertDictEqual(
            data,
            {
              "args": {
                "assignee": None,
                "author": None,
                "status": None,
                "tags": []
              },
              "total_issues": 1,
              "issues": [
                {
                  "assignee": None,
                  "blocks": [],
                  "comments": [],
                  "content": "This issue needs attention",
                  "date_created": "1431414800",
                  "close_status": None,
                  "closed_at": None,
                  "depends": [],
                  "id": 1,
                  "milestone": None,
                  "priority": None,
                  "private": False,
                  "status": "Open",
                  "tags": [],
                  "title": "test issue",
                  "user": {
                    "fullname": "PY C",
                    "name": "pingou"
                  }
                }
              ]
            }
        )

        headers = {'Authorization': 'token aaabbbcccddd'}

        # Access issues authenticated correctly
        output = self.app.get('/api/0/test/issues', headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        data['issues'][0]['date_created'] = '1431414800'
        data['issues'][1]['date_created'] = '1431414800'
        self.assertDictEqual(
            data,
            {
              "args": {
                "assignee": None,
                "author": None,
                "status": None,
                "tags": []
              },
              "total_issues": 2,
              "issues": [
                {
                  "assignee": None,
                  "blocks": [],
                  "comments": [],
                  "content": "We should work on this",
                  "date_created": "1431414800",
                  "close_status": None,
                  "closed_at": None,
                  "depends": [],
                  "id": 2,
                  "milestone": None,
                  "priority": None,
                  "private": True,
                  "status": "Open",
                  "tags": [],
                  "title": "Test issue",
                  "user": {
                    "fullname": "PY C",
                    "name": "pingou"
                  }
                },
                {
                  "assignee": None,
                  "blocks": [],
                  "comments": [],
                  "content": "This issue needs attention",
                  "date_created": "1431414800",
                  "close_status": None,
                  "closed_at": None,
                  "depends": [],
                  "id": 1,
                  "milestone": None,
                  "priority": None,
                  "private": False,
                  "status": "Open",
                  "tags": [],
                  "title": "test issue",
                  "user": {
                    "fullname": "PY C",
                    "name": "pingou"
                  }
                }
              ]
            }
        )

        # List closed issue
        output = self.app.get('/api/0/test/issues?status=Closed', headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "args": {
                "assignee": None,
                "author": None,
                "status": "Closed",
                "tags": []
              },
              "total_issues": 0,
              "issues": []
            }
        )

        # List closed issue
        output = self.app.get('/api/0/test/issues?status=Invalid', headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "args": {
                "assignee": None,
                "author": None,
                "status": "Invalid",
                "tags": []
              },
              "total_issues": 0,
              "issues": []
            }
        )

        # List all issues
        output = self.app.get('/api/0/test/issues?status=All', headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        data['issues'][0]['date_created'] = '1431414800'
        data['issues'][1]['date_created'] = '1431414800'
        self.assertDictEqual(
            data,
            {
                "args": {
                    "assignee": None,
                    "author": None,
                    "status": "All",
                    "tags": []
                },
                "total_issues": 2,
                "issues": [
                    {
                        "assignee": None,
                        "blocks": [],
                        "comments": [],
                        "content": "We should work on this",
                        "date_created": "1431414800",
                        "close_status": None,
                        "closed_at": None,
                        "depends": [],
                        "id": 2,
                        "milestone": None,
                        "priority": None,
                        "private": True,
                        "status": "Open",
                        "tags": [],
                        "title": "Test issue",
                        "user": {
                            "fullname": "PY C",
                            "name": "pingou"
                        }
                    },
                    {
                        "assignee": None,
                        "blocks": [],
                        "comments": [],
                        "content": "This issue needs attention",
                        "date_created": "1431414800",
                        "close_status": None,
                        "closed_at": None,
                        "depends": [],
                        "id": 1,
                        "milestone": None,
                        "priority": None,
                        "private": False,
                        "status": "Open",
                        "tags": [],
                        "title": "test issue",
                        "user": {
                            "fullname": "PY C",
                            "name": "pingou"
                        }
                    }
                ],
            }
        )

Example 43

Project: telepot Source File: __init__.py
    def message_loop(self, callback=None, relax=0.1, timeout=20, source=None, ordered=True, maxhold=3, run_forever=False):
        """
        Spawn a thread to constantly ``getUpdates`` or pull updates from a queue.
        Apply ``callback`` to every message received. Also starts the scheduler thread
        for internal events.

        :param callback:
            a function that takes one argument (the message), or a routing table.
            If ``None``, the bot's ``handle`` method is used.

        A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
        handler functions according to their flavors. It allows you to define functions specifically
        to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
        'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
        one argument (the message).

        :param source:
            Source of updates.
            If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
            If it is a synchronized queue (``Queue.Queue`` in Python 2.7 or
            ``queue.Queue`` in Python 3), new messages are pulled from the queue.
            A web application implementing a webhook can dump updates into the queue,
            while the bot pulls from it. This is how telepot can be integrated with webhooks.

        Acceptable contents in queue:

        - ``str``, ``unicode`` (Python 2.7), or ``bytes`` (Python 3, decoded using UTF-8)
          representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
        - a ``dict`` representing an Update object.

        When ``source`` is ``None``, these parameters are meaningful:

        :type relax: float
        :param relax: seconds between each ``getUpdates``

        :type timeout: int
        :param timeout:
            ``timeout`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
            controlling how long to poll.

        When ``source`` is a queue, these parameters are meaningful:

        :type ordered: bool
        :param ordered:
            If ``True``, ensure in-order delivery of messages to ``callback``
            (i.e. updates with a smaller ``update_id`` always come before those with
            a larger ``update_id``).
            If ``False``, no re-ordering is done. ``callback`` is applied to messages
            as soon as they are pulled from queue.

        :type maxhold: float
        :param maxhold:
            Applied only when ``ordered`` is ``True``. The maximum number of seconds
            an update is held waiting for a not-yet-arrived smaller ``update_id``.
            When this number of seconds is up, the update is delivered to ``callback``
            even if some smaller ``update_id``\s have not yet arrived. If those smaller
            ``update_id``\s arrive at some later time, they are discarded.

        Finally, there is this parameter, meaningful always:

        :type run_forever: bool or str
        :param run_forever:
            If ``True`` or any non-empty string, append an infinite loop at the end of
            this method, so it never returns. Useful as the very last line in a program.
            A non-empty string will also be printed, useful as an indication that the
            program is listening.
        """
        if callback is None:
            callback = self.handle
        elif isinstance(callback, dict):
            callback = flavor_router(callback)

        collect_queue = queue.Queue()

        def collector():
            while 1:
                try:
                    item = collect_queue.get(block=True)
                    callback(item)
                except:
                    # Localize error so thread can keep going.
                    traceback.print_exc()

        def relay_to_collector(update):
            key = _find_first_key(update, ['message',
                                           'edited_message',
                                           'callback_query',
                                           'inline_query',
                                           'chosen_inline_result'])
            collect_queue.put(update[key])
            return update['update_id']

        def get_from_telegram_server():
            offset = None  # running offset
            while 1:
                try:
                    result = self.getUpdates(offset=offset, timeout=timeout)

                    if len(result) > 0:
                        # No sort. Trust server to give messages in correct order.
                        # Update offset to max(update_id) + 1
                        offset = max([relay_to_collector(update) for update in result]) + 1
                except:
                    traceback.print_exc()
                finally:
                    time.sleep(relax)

        def dictify3(data):
            if type(data) is bytes:
                return json.loads(data.decode('utf-8'))
            elif type(data) is str:
                return json.loads(data)
            elif type(data) is dict:
                return data
            else:
                raise ValueError()

        def dictify27(data):
            if type(data) in [str, unicode]:
                return json.loads(data)
            elif type(data) is dict:
                return data
            else:
                raise ValueError()

        def get_from_queue_unordered(qu):
            dictify = dictify3 if sys.version_info >= (3,) else dictify27
            while 1:
                try:
                    data = qu.get(block=True)
                    update = dictify(data)
                    relay_to_collector(update)
                except:
                    traceback.print_exc()

        def get_from_queue(qu):
            dictify = dictify3 if sys.version_info >= (3,) else dictify27

            # Here is the re-ordering mechanism, ensuring in-order delivery of updates.
            max_id = None                 # max update_id passed to callback
            buffer = collections.deque()  # keep those updates which skip some update_id
            qwait = None                  # how long to wait for updates,
                                          # because buffer's content has to be returned in time.

            while 1:
                try:
                    data = qu.get(block=True, timeout=qwait)
                    update = dictify(data)

                    if max_id is None:
                        # First message received, handle regardless.
                        max_id = relay_to_collector(update)

                    elif update['update_id'] == max_id + 1:
                        # No update_id skipped, handle naturally.
                        max_id = relay_to_collector(update)

                        # clear contagious updates in buffer
                        if len(buffer) > 0:
                            buffer.popleft()  # first element belongs to update just received, useless now.
                            while 1:
                                try:
                                    if type(buffer[0]) is dict:
                                        max_id = relay_to_collector(buffer.popleft())  # updates that arrived earlier, handle them.
                                    else:
                                        break  # gap, no more contagious updates
                                except IndexError:
                                    break  # buffer empty

                    elif update['update_id'] > max_id + 1:
                        # Update arrives pre-maturely, insert to buffer.
                        nbuf = len(buffer)
                        if update['update_id'] <= max_id + nbuf:
                            # buffer long enough, put update at position
                            buffer[update['update_id'] - max_id - 1] = update
                        else:
                            # buffer too short, lengthen it
                            expire = time.time() + maxhold
                            for a in range(nbuf, update['update_id']-max_id-1):
                                buffer.append(expire)  # put expiry time in gaps
                            buffer.append(update)

                    else:
                        pass  # discard

                except queue.Empty:
                    # debug message
                    # print('Timeout')

                    # some buffer contents have to be handled
                    # flush buffer until a non-expired time is encountered
                    while 1:
                        try:
                            if type(buffer[0]) is dict:
                                max_id = relay_to_collector(buffer.popleft())
                            else:
                                expire = buffer[0]
                                if expire <= time.time():
                                    max_id += 1
                                    buffer.popleft()
                                else:
                                    break  # non-expired
                        except IndexError:
                            break  # buffer empty
                except:
                    traceback.print_exc()
                finally:
                    try:
                        # don't wait longer than next expiry time
                        qwait = buffer[0] - time.time()
                        if qwait < 0:
                            qwait = 0
                    except IndexError:
                        # buffer empty, can wait forever
                        qwait = None

                    # debug message
                    # print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)

        collector_thread = threading.Thread(target=collector)
        collector_thread.daemon = True
        collector_thread.start()

        if source is None:
            message_thread = threading.Thread(target=get_from_telegram_server)
        elif isinstance(source, queue.Queue):
            if ordered:
                message_thread = threading.Thread(target=get_from_queue, args=(source,))
            else:
                message_thread = threading.Thread(target=get_from_queue_unordered, args=(source,))
        else:
            raise ValueError('Invalid source')

        message_thread.daemon = True  # need this for main thread to be killable by Ctrl-C
        message_thread.start()

        self._scheduler._output_queue = collect_queue
        self._scheduler.daemon = True
        self._scheduler.start()

        if run_forever:
            if _isstring(run_forever):
                print(run_forever)
            while 1:
                time.sleep(10)

Example 44

Project: ops-server-config Source File: BuildSceneCache.py
def main(argv=None):

    exit_err_code = 1
    
    total_success = True
    
    # Print/get script arguments
    results = print_args()
    if not results:
        sys.exit(exit_err_code)
    serverName, username, password, selectedServiceNames = results
    
    # Get/generateToken a token from the sharing api of Portal. Use the secure (https) 7443 port.
    portalPort = 7443
    token = getToken(username, password, serverName, portalPort)
    #print token
    if token == "":
        print "Could not generate a token with the username and password provided."
        sys.exit(exit_err_code)
    else:
        if 'error' in token:
            for error in token['error']:
                if (str(error) == 'code' and error[0] != 200):
                    sys.exit(exit_err_code)

    print '\n{}'.format(sectionBreak)
    print 'Build Scene Service Cache'
    print sectionBreak
        
    # Get the list of (scene) services available from arcgis server
    serverPort = 6443
    HostedServiceEndpnt = '/arcgis/rest/services/Hosted'
    data = getJsonResponse(serverName, username, password, HostedServiceEndpnt, token, serverPort)    
    obj = json.loads(data)
    
    #service name comes in form of "Hosted/Buildings". Drop the folder name 'Hosted'
    sceneServicenames = []
    for service in obj['services']:
        for key, value in service.iteritems():
            if (key == 'name'):                
                if (value[:7] == 'Hosted/'):
                    name = str(value[7:])
                else:
                    name = str(value)
            if (key == 'type'):
                if (value == 'SceneServer'):
                    sceneServicenames.append(name)
    
    if len(sceneServicenames) == 0:
        print '\nWARNING: Server {} does not have any scene services. Exiting script.'.format(serviceName)
        sys.exit(0)

    if selectedServiceNames is None:
        selectedServiceNames = sceneServicenames

    # Validate if specified scene services exist
    invalidServiceNames = []
    for selectedServiceName in selectedServiceNames:
        if selectedServiceName not in sceneServicenames:
            invalidServiceNames.append(selectedServiceName)

    if len(invalidServiceNames) > 0:
        print '\nERROR: the following specified scene services do not exist:'
        print invalidServiceNames
        sys.exit(exit_err_code)
        
    if len(selectedServiceNames) > 0:
        print '\nList of scene services to cache:'
        for serviceName in selectedServiceNames:
            print serviceName
        
    for serviceName in selectedServiceNames:
        print '\n{}'.format(sectionBreak1)
        print serviceName
        print sectionBreak1
        
        # Todo: reject name if it doesn't match existing service
        service_url = 'https://{}/arcgis/rest/services/Hosted/{}/SceneServer'.format(serverName, serviceName)
        
        # For now, let's just comment out the code to retrieve, list, and
        # allow the user to specify which layers in the scene service to cache
        
        # # Get all layer id and names for serviceName provided by user and determine if cache is to be built only for specific layer/s (default is to build cache for all layers)     
        # ServiceEndpnt = '/arcgis/rest/services/Hosted/{}/SceneServer'.format(serviceName)
        # data = getJsonResponse(serverName, username, password, ServiceEndpnt, token, serverPort)    
        # obj = json.loads(data)
        # 
        # print ('Below are the list of layers available for caching as acquired from :' + '\n'
        #        'https://' + serverName + ServiceEndpnt + '?f=pjson' + '\n')
        # 
        # print ('If caching of a specific layer/s is desired just enter the layerID/s from the list below for the layer/s you are interested in caching.' + '\n'
        #        'Default (if set to -1 or is not specified) is to cache all layers.' + '\n')
        # 
        # print ('LayerName : LayerID')
        # 
        # layerids = []
        # layernames = []
        # for layers in obj['layers']:        
        #     for key, value in layers.iteritems():
        #         if (key == 'id'):                                
        #             layerids.append(int(value))
        #         if (key == 'name'):
        #             layernames.append(str(value))        
        # print_data(layernames, layerids)   
        # 
        # layerIDs = []
        # if (len(selectedServiceNames) == 1):
        #     layerIDs = raw_input("Enter the layer id(s) of the layer(s) you\'d like to cache separated by comma. (ex.'3,5,8')")
        # else:
        #     layerIDs = '-1'
        #     print 'Multiple services selected for caching. Selecting of individual layers to cache is disabled.'
        # 
        # if (str(layerIDs) == '-1' or len(str(layerIDs)) <= 0):
        #     layer = "{}"
        # else:        
        #     layerIDIntlist = [int(e) if e.isdigit() else e for e in layerIDs.split(',')]
        #     layerJson = json_list(layerIDIntlist, 'id')
        #     layer = '{"layers":%s}' % (layerJson)
        
        # Build cache for all layers
        layer = '{}'
        
        # Construct the parameters to submit to the 'Manage Scene cache' tool
        num_of_caching_service_instances = 2
        update_mode = 'RECREATE_ALL_NODES'
        returnZ = 'false'
        update_extent = 'DEFAULT'
        area_of_interest = ''
        params = urllib.urlencode({'token': token, 'f': 'json', 'service_url': service_url,
                                   'num_of_caching_service_instances' : num_of_caching_service_instances,
                                    'layer': layer, 'update_mode': update_mode, 'returnZ': returnZ,
                                    'update_extent': update_extent, 'area_of_interest': area_of_interest})
        
        headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "Referer": script_referrer}

        # Format the GP service tool url
        SceneCachingToolURL = '/arcgis/rest/services/System/SceneCachingControllers/GPServer/Manage%20Scene%20Cache'
        submitJob = '{}/submitJob'.format(SceneCachingToolURL)
        # Connect to URL and post parameters (using https!)
        # Set the port to 6443 as it needs to be the https server port as portal communicates to server via a secure port
        # Note if federated server is running on a different machine than the portal, chanage the 'serverName' parameter below accordingly.
        
        httpConn = httplib.HTTPSConnection(serverName, serverPort)
        httpConn.request("POST", submitJob, params, headers)
        
        # Read response
        response = httpConn.getresponse()
        if (response.status != 200):
            httpConn.close()
            print response.reason        
            return
        else:
            data = response.read()
            httpConn.close()

            # Check that data returned is not an error object
            if not assertJsonSuccess(data):          
                print 'Error returned by operation. ' + data
            else:
                print 'Scene Caching Job Submitted successfully!'
                print 'Caching Job status updates every {} seconds...'.format(cacheJobStatusUpdateFreq)
         
                # Extract the jobID from it
                jobid = json.loads(data)            
                guidJobId = str(jobid['jobId'])
                print 'JobID: {}'.format(guidJobId)
                
                # get the job status from the tool..             
                SceneCachingToolJobsURL = '{}/jobs/{}'.format(SceneCachingToolURL, guidJobId)            
                # Check the status of the result object every n seconds until it stops execution..
                result = True
                while result == True:                
                    time.sleep(cacheJobStatusUpdateFreq)
                    result = getJobStatusMessage(serverName, username, password, SceneCachingToolJobsURL, token, serverPort)
                #return

    print '\n\nScript {} completed.\n'.format(scriptName)
    sys.exit(0)

Example 45

Project: ru Source File: default.py
def GetVideo(url):
	if re.search('vk\.com|vkontakte\.ru', url):
		http = GET(url)
		soup = bs(http, from_encoding = "windows-1251")
		#sdata1 = soup.find('div', class_ = "scroll_fix_wrap", id = "page_wrap")
		rmdata = soup.find('div', style = "position:absolute; top:50%; text-align:center; right:0pt; left:0pt; font-family:Tahoma; font-size:12px; color:#FFFFFF;")
		if rmdata:
			rmdata = rmdata.find('div', style = False, class_ = False)
			if rmdata.br: rmdata.br.replace_with(" ")
			rmdata = "".join(list(rmdata.strings)).strip().encode('utf-8')
			print rmdata
			vk_email = Addon.getSetting('vk_email')
			vk_pass = Addon.getSetting('vk_pass')
			if 'изъято' in rmdata or not vk_email:
				ShowMessage("ВКонтакте", rmdata, times = 20000)
				return False
			oid, id = re.findall('oid=([-0-9]*)&id=([0-9]*)', url)[0]
			url = 'http://vk.com/video' + oid + '_' + id
			#print url
			from vk_auth import vk_auth as vk
			vks = vk(vk_email, vk_pass)
			crid = vks.get_remixsid_cookie()
			if crid:
				if debug_mode: ShowMessage("ВКонтакте", "Применена авторизация")
			else:
				ShowMessage("ВКонтакте", "Ошибка авторизации")
				print "ошибка авторизации вконтакте"
				return False
			#print crid
			html = GET(url, headers = {"Cookie": crid})
			#print html
			rec = re.findall('var vars = ({.+?});', html)
			if rec:
				rec = rec[0]
				rec = rec.replace('\\', '')
			else:
				ShowMessage("ВКонтакте", "Видео недоступно")
				#print "видео недоступно"
				#if gebug_mode: print html
				return False
			#print 'rec: ' + str(rec)
			fvs = json.loads(rec, encoding = "windows-1251")
			#print json.dumps(fvs, indent = 1).encode('utf-8')
		else:
			rec = soup.find_all('param', {'name': 'flashvars'})[0]['value']
			fvs = urlparse.parse_qs(rec)
		#print json.dumps(fvs, indent = 1).encode('utf-8')
		uid = fvs['uid'][0]
		vtag = fvs['vtag'][0]
		#host = fvs['host'][0]
		#vid = fvs['vid'][0]
		#oid = fvs['oid'][0]
		q_list = {None: '240', '1': '360', '2': '480', '3': '720'}
		hd = fvs['hd'] if 'hd' in fvs else None
		if isinstance(hd, list): hd = hd[0]
		if isinstance(hd, float): hd = str(int(hd))
		print q_list[hd] + "p"
		#burl = host + 'u' + uid + '/videos/' + vtag + '.%s.mp4'
		#q_url_map = {q: burl % q for q in q_list.values()}
		#print q_url_map
		url = fvs['url' + q_list[hd]]
		if isinstance(url, list): url = url[0]
		#url = url.replace('vk.me', 'vk.com')
		sr = urlparse.urlsplit(url)
		if not IsIPv4(sr[1]):
			ipv = '6'
			url = url.replace('v6', '', 1)
		else: ipv = '4'
		if debug_mode: print 'IPv' + ipv
		#print url
		return url
	
	elif re.search('moonwalk\.cc|37\.220\.36\.\d{1,3}|serpens\.nl', url):
		page = GET(url)
		token = re.findall("video_token: '(.*?)'", page)[0]
		access_key = re.findall("access_key: '(.*?)'", page)[0]
		d_id = re.findall("d_id: (\d*)", page)[0]
		#referer = re.findall(r'player_url = "(.+?\.swf)";', page)[0]
		referer = url
		post = urllib.urlencode({"video_token": token, "access_key": access_key, "d_id": d_id, "content_type": 'movie'})
		#print post
		page = GET('http://moonwalk.cc/sessions/create_session', post = post, opts = 'xmlhttp', ref = url, headers = None)
		#print page
		page = json.loads(page)
		if use_ahds:
			url = page["manifest_f4m"]
		else:
			url = page["manifest_m3u8"]
		
		headers = {'User-Agent': UA, 'Connection': 'Keep-Alive', 'Referer': 'http://37.220.36.28/static/player/player_base.swf'}
		url += '|' + urllib.urlencode(headers)
		#print url
		return url
	
	elif 'rutube.ru' in url:
		data = GET(url)
		#print data
		import HTMLParser
		hp = HTMLParser.HTMLParser()
		data = hp.unescape(data)
		match = re.compile('"m3u8": "(.+?)"').findall(data)
		#print match
		if len(match) > 0:
			url = match[0]
			return url
	
	elif re.search('api\.video\.mail\.ru|videoapi\.my\.mail\.ru', url):
		data = GET(url)
		#match = re.compile('videoSrc = "(.+?)",').findall(data)
		match = re.compile('"metadataUrl":"(.+?)"').findall(data)
		if len(match) > 0:
			url = match[0]
		else:
			print "Mail.ru video parser is failed"
			ShowMessage(addon_name, "Mail.ru video parser is failed")
			return False
		data = GET(url, opts = 'headers')
		video_key_c = data[1].getheader('Set-Cookie')
		video_key_c = re.compile('(video_key=.+?;)').findall(video_key_c)
		if len(video_key_c) > 0:
			video_key_c = video_key_c[0]
		else:
			print "Mail.ru video parser is failed"
			ShowMessage(addon_name, "Mail.ru video parser is failed")
			return False
		jsdata = json.loads(data[0])
		vlist = jsdata['videos']
		vlist.sort(key = lambda i: i['key'])
		vdata = vlist[-1]
		url = vdata['url']
		headers = {'Cookie': video_key_c}
		url += '|' + urllib.urlencode(headers)
		return url
	
	elif 'youtube.com' in url:
		if '/embed/' in url:
			if debug_mode: print 'embed'
			video_id = re.findall('embed/(.+)\??', url)[0]
		else:
			finder = url.find('=')
			video_id = url[finder + 1:]
		url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % (video_id)
		print url
		return url
	
	elif re.search('moevideo\.net|playreplay\.net|videochart\.net', url):
		o = urlparse.urlparse(url)
		#print o
		uid = re.findall('http://(?:.+?)/framevideo/(.+?)\?', url)
		if uid: uid = uid[0]
		post = urllib.urlencode({"r": '[["file/flv_link",{"uid":"%s"}]]' % (uid)})
		purl = urlparse.urlunsplit((o.scheme, o.netloc, '/data', '' , ''))
		#print purl
		page = GET(purl, post = post)
		#print page
		page = json.loads(page)
		#print json.dumps(page, indent = 1).encode('utf-8')
		url = page['data'][0]['link']
		return url
		
	else:
		ShowMessage(addon_name, "Неизвестный видеохостинг: " + url)
		print "Неизвестный видеохостинг: " + url
		return False

Example 46

Project: cricket Source File: executor.py
    def poll(self):
        "Poll the runner looking for new test output"
        stopped = False
        finished = False

        # Read from stdout, building a buffer.
        lines = []
        try:
            while True:
                lines.append(self.stdout.get(block=False))
        except Empty:
            # queue.get() raises an exception when the queue is empty.
            # This means there is no more output to consume at this time.
            pass

        # Read from stderr, building a buffer.
        try:
            while True:
                self.error_buffer.append(self.stderr.get(block=False))
        except Empty:
            # queue.get() raises an exception when the queue is empty.
            # This means there is no more output to consume at this time.
            pass

        # Check to see if the subprocess is still running.
        # If it isn't, raise an error.
        if self.proc is None:
            stopped = True
        elif self.proc.poll() is not None:
            stopped = True

        # Process all the full lines that are available
        for line in lines:
            # Look for a separator.
            if line in (PipedTestResult.RESULT_SEPARATOR, PipedTestRunner.START_TEST_RESULTS, PipedTestRunner.END_TEST_RESULTS):
                if self.buffer is None:
                    # Preamble is finished. Set up the line buffer.
                    self.buffer = []
                else:
                    # Start of new test result; record the last result
                    # Then, work out what content goes where.
                    pre = json.loads(self.buffer[0])
                    post = json.loads(self.buffer[1])

                    if post['status'] == 'OK':
                        status = TestMethod.STATUS_PASS
                        error = None
                    elif post['status'] == 's':
                        status = TestMethod.STATUS_SKIP
                        error = 'Skipped: ' + post.get('error')
                    elif post['status'] == 'F':
                        status = TestMethod.STATUS_FAIL
                        error = post.get('error')
                    elif post['status'] == 'x':
                        status = TestMethod.STATUS_EXPECTED_FAIL
                        error = post.get('error')
                    elif post['status'] == 'u':
                        status = TestMethod.STATUS_UNEXPECTED_SUCCESS
                        error = None
                    elif post['status'] == 'E':
                        status = TestMethod.STATUS_ERROR
                        error = post.get('error')

                    # Increase the count of executed tests
                    self.completed_count = self.completed_count + 1

                    # Get the start and end times for the test
                    start_time = float(pre['start_time'])
                    end_time = float(post['end_time'])

                    self.current_test.description = post['description']

                    self.current_test.set_result(
                        status=status,
                        output=post.get('output'),
                        error=error,
                        duration=end_time - start_time,
                    )

                    # Work out how long the suite has left to run (approximately)
                    if self.start_time is None:
                        self.start_time = start_time
                    total_duration = end_time - self.start_time
                    time_per_test = total_duration / self.completed_count
                    remaining_time = (self.total_count - self.completed_count) * time_per_test
                    if remaining_time > 4800:
                        remaining = '%s hours' % int(remaining_time / 2400)
                    elif remaining_time > 2400:
                        remaining = '%s hour' % int(remaining_time / 2400)
                    elif remaining_time > 120:
                        remaining = '%s mins' % int(remaining_time / 60)
                    elif remaining_time > 60:
                        remaining = '%s min' % int(remaining_time / 60)
                    else:
                        remaining = '%ss' % int(remaining_time)

                    # Update test result counts
                    self.result_count.setdefault(status, 0)
                    self.result_count[status] = self.result_count[status] + 1

                    # Notify the display to update.
                    self.emit('test_end', test_path=self.current_test.path, result=status, remaining_time=remaining)

                    # Clear the decks for the next test.
                    self.current_test = None
                    self.buffer = []

                    if line == PipedTestRunner.END_TEST_RESULTS:
                        # End of test execution.
                        # Mark the runner as finished, and move back
                        # to a pre-test state in the results.
                        finished = True
                        self.buffer = None

            else:
                # Not a separator line, so it's actual content.
                if self.buffer is None:
                    # Suite isn't running yet - just display the output
                    # as a status update line.
                    self.emit('test_status_update', update=line)
                else:
                    # Suite is running - have we got an active test?
                    # Doctest (and some other tools) output invisible escape sequences.
                    # Strip these if they exist.
                    if line.startswith('\x1b'):
                        line = line[line.find('{'):]

                    # Store the cleaned buffer
                    self.buffer.append(line)

                    # If we don't have an currently active test, this line will
                    # contain the path for the test.
                    if self.current_test is None:
                        try:
                            # No active test; first line tells us which test is running.
                            pre = json.loads(line)
                        except ValueError:
                            self.emit('suit_end')
                            return True
                        self.current_test = self.project.confirm_exists(pre['path'])
                        self.emit('test_start', test_path=pre['path'])
        # If we're not finished, requeue the event.
        if finished:
            if self.error_buffer:
                self.emit('suite_end', error='\n'.join(self.error_buffer))
            else:
                self.emit('suite_end')
            return False

        elif stopped:
            # Suite has stopped producing output.
            if self.error_buffer:
                self.emit('suite_error', error=b'\n'.join(self.error_buffer))
            else:
                self.emit('suite_error', error='Test output ended unexpectedly')

            # Suite has finished; don't requeue
            return False

        else:
            # Still running - requeue event.
            return True

Example 47

Project: pagure Source File: test_pagure_flask_api_issue.py
    @patch('pagure.lib.git.update_git')
    @patch('pagure.lib.notify.send_email')
    def test_api_comment_issue(self, p_send_email, p_ugt):
        """ Test the api_comment_issue method of the flask api. """
        p_send_email.return_value = True
        p_ugt.return_value = True

        tests.create_projects(self.session)
        tests.create_tokens(self.session)
        tests.create_tokens_acl(self.session)

        headers = {'Authorization': 'token aaabbbcccddd'}

        # Invalid project
        output = self.app.post('/api/0/foo/issue/1/comment', headers=headers)
        self.assertEqual(output.status_code, 404)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "error": "Project not found",
              "error_code": "ENOPROJECT",
            }
        )

        # Valid token, wrong project
        output = self.app.post('/api/0/test2/issue/1/comment', headers=headers)
        self.assertEqual(output.status_code, 401)
        data = json.loads(output.data)
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.name,
                         data['error_code'])
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data['error'])

        # No input
        output = self.app.post('/api/0/test/issue/1/comment', headers=headers)
        self.assertEqual(output.status_code, 404)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "error": "Issue not found",
              "error_code": "ENOISSUE",
            }
        )

        # Create normal issue
        repo = pagure.lib.get_project(self.session, 'test')
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue #1',
            content='We should work on this',
            user='pingou',
            ticketfolder=None,
            private=False,
            issue_uid='aaabbbccc#1',
        )
        self.session.commit()
        self.assertEqual(msg.title, 'Test issue #1')

        # Check comments before
        repo = pagure.lib.get_project(self.session, 'test')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(len(issue.comments), 0)

        data = {
            'title': 'test issue',
        }

        # Incomplete request
        output = self.app.post(
            '/api/0/test/issue/1/comment', data=data, headers=headers)
        self.assertEqual(output.status_code, 400)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {
              "error": "Invalid or incomplete input submited",
              "error_code": "EINVALIDREQ",
            }
        )

        # No change
        repo = pagure.lib.get_project(self.session, 'test')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(issue.status, 'Open')

        data = {
            'comment': 'This is a very interesting question',
        }

        # Valid request
        output = self.app.post(
            '/api/0/test/issue/1/comment', data=data, headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {'message': 'Comment added'}
        )

        # One comment added
        repo = pagure.lib.get_project(self.session, 'test')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(len(issue.comments), 1)

        # Create another project
        item = pagure.lib.model.Project(
            user_id=2,  # foo
            name='foo',
            description='test project #3',
            hook_token='aaabbbdddeee',
        )
        self.session.add(item)
        self.session.commit()

        # Create a token for pingou for this project
        item = pagure.lib.model.Token(
            id='pingou_foo',
            user_id=1,
            project_id=3,
            expiration=datetime.datetime.utcnow() + datetime.timedelta(
                days=30)
        )
        self.session.add(item)
        self.session.commit()

        # Give `issue_change_status` to this token when `issue_comment`
        # is required
        item = pagure.lib.model.TokenAcl(
            token_id='pingou_foo',
            acl_id=2,
        )
        self.session.add(item)
        self.session.commit()

        repo = pagure.lib.get_project(self.session, 'foo')
        # Create private issue
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue',
            content='We should work on this',
            user='foo',
            ticketfolder=None,
            private=True,
            issue_uid='aaabbbccc#2',
        )
        self.session.commit()
        self.assertEqual(msg.title, 'Test issue')

        # Check before
        repo = pagure.lib.get_project(self.session, 'foo')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(len(issue.comments), 0)

        data = {
            'comment': 'This is a very interesting question',
        }
        headers = {'Authorization': 'token pingou_foo'}

        # Valid request but un-authorized
        output = self.app.post(
            '/api/0/foo/issue/1/comment', data=data, headers=headers)
        self.assertEqual(output.status_code, 401)
        data = json.loads(output.data)
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.name,
                         data['error_code'])
        self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data['error'])

        # No comment added
        repo = pagure.lib.get_project(self.session, 'foo')
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        self.assertEqual(len(issue.comments), 0)

        # Create token for user foo
        item = pagure.lib.model.Token(
            id='foo_token2',
            user_id=2,
            project_id=3,
            expiration=datetime.datetime.utcnow() + datetime.timedelta(days=30)
        )
        self.session.add(item)
        self.session.commit()
        tests.create_tokens_acl(self.session, token_id='foo_token2')

        data = {
            'comment': 'This is a very interesting question',
        }
        headers = {'Authorization': 'token foo_token2'}

        # Valid request and authorized
        output = self.app.post(
            '/api/0/foo/issue/1/comment', data=data, headers=headers)
        self.assertEqual(output.status_code, 200)
        data = json.loads(output.data)
        self.assertDictEqual(
            data,
            {'message': 'Comment added'}
        )

Example 48

Project: wotdecoder Source File: wotdecoder.py
def replay(filename, to_decode):
# filename= name of .wotreplay file
# to_decode= bitmask of chunks you want decoded.
# We do not just count blocks as they are in replay files. Instead we always decode
# Bit 0 = first Json block, starting player list
# Bit 1 = second Json block, simplified frag count
# Bit 2 = pickle, proper battle result with damage numbers
# 7(binary 111) means decode all three. 5(binary 101) means decode first Json and pikle.
#
# returns decoded_chunks[0:3], chunks bitmask, decoder status

  while True:
    wot_replay_magic_number = "12323411"
    blocks = 0
    first_chunk_decoded = {}
    second_chunk_decoded = {}
    third_chunk_decoded = {}
    chunks_bitmask = 0
    filesize = os.path.getsize(filename)
    if filesize<12: processing =10; break
    f = open(filename, "rb")
    if f.read(4)!=bytes.fromhex(wot_replay_magic_number): processing =11; break  
    blocks = struct.unpack("i",f.read(4))[0]

# 8.1 Adds new unencrypted Python pickle block containing your match stats
# Before 8.1 (< 20121101)
#  Json + binary = 1 = incomplete.
#  Json + Json + binary = 2 = complete.
# After  8.1 (>=20121101)
#  Json + binary = 1 = incomplete.
#  Json + pickle + binary = 2 = incomplete, but you looked at 'Battle Result' screen and replay got updated.
#  Json + Json + pickle + binary = 3 = complete.
# Some oddities:
#  Json + Json + ~8 bytes = 2 = incomplete, game crashed somewhere, second Json has game result, but we are missing Pickle
#
# Proper way to detect replay version is to decrypt and decompress binary part, but that is too slow.
# Instead I am using Date to estimate version in a very crude way. It is only accurade down to a day and doesnt take into
# consideration player timezone so I need to double check replays saved at 20121101. Still faster than decrypting and
# unzipping 1MB files.


    first_size = struct.unpack("i",f.read(4))[0]
#    print (first_size, filename)

    if filesize < (12+first_size+4): processing =10; break

    if (blocks == 1) and (not (to_decode&1)): processing =1; break

    first_chunk = f.read(first_size)
    if first_chunk[0:1] != b'{': processing =13; break
    first_chunk_decoded = json.loads(first_chunk.decode('utf-8'))
    chunks_bitmask = 1

    if blocks == 1: processing =1; break
    if ((blocks!=2) and (blocks!=3)): processing =16; break

    replaydate = datetime.strptime(first_chunk_decoded['dateTime'][0:10], "%d.%m.%Y")

    second_size = struct.unpack("i",f.read(4))[0]
    if filesize < (12+first_size+4+second_size): processing =10; break
    second_chunk = f.read(second_size)

# <20121101 and blocks==2 means Complete (pre 8.1). Second block should be Json.
    if (replaydate < datetime(2012, 11, 1)) and blocks==2:
      if second_chunk[0:2] == b'[{':
# Complete (pre 8.1).
        if to_decode&2:
          second_chunk_decoded = json.loads(second_chunk.decode('utf-8'))
          chunks_bitmask = chunks_bitmask|2
        processing =3; break
      else: processing =14; break

# =20121101 and blocks==2 can go both ways, need to autodetect second block.
# >20121101 and blocks==2 can contain broken replay
    elif (replaydate >= datetime(2012, 11, 1)) and blocks==2:
      if second_chunk[0:2] == b'(d':
# Incomplete (past 8.1), with 'Battle Result' pickle.
        if to_decode&4:
          third_chunk_decoded = _Unpickler(io.BytesIO(second_chunk)).load()
          chunks_bitmask = chunks_bitmask|4
          for b in third_chunk_decoded['vehicles']:
            third_chunk_decoded['vehicles'][b]['details']= _Decoder.decode_details(third_chunk_decoded['vehicles'][b]['details'].encode('raw_unicode_escape'))
            third_chunk_decoded['players'][ third_chunk_decoded['vehicles'][b]['accountDBID'] ]["vehicleid"]=b
        processing =2; break
      elif second_chunk[0:2] == b'[{':
        if to_decode&2:
          second_chunk_decoded = json.loads(second_chunk.decode('utf-8'))
          chunks_bitmask = chunks_bitmask|2
        if replaydate == datetime(2012, 11, 1):
# Complete (pre 8.1).
          processing =3; break
        else:
# Bugged (past 8.1). Game crashed somewhere, second Json has game result.
          processing =6; break

# >=20121101 and blocks==3 means Complete (past 8.1).
    elif (replaydate >= datetime(2012, 11, 1)) and blocks==3:
      if second_chunk[0:2] == b'[{':
        if to_decode&2:
          second_chunk_decoded = json.loads(second_chunk.decode('utf-8'))
          chunks_bitmask = chunks_bitmask|2
        if filesize<(12+first_size+4+second_size+4): processing =10; break
        third_size = struct.unpack("i",f.read(4))[0]
        if filesize<(12+first_size+4+second_size+4+third_size): processing =10; break
        third_chunk = f.read(third_size)
        if third_chunk[0:2] == b'(d':
          if to_decode&4:
            third_chunk_decoded = _Unpickler(io.BytesIO(third_chunk)).load()
            chunks_bitmask = chunks_bitmask|4
            for b in third_chunk_decoded['vehicles']:
              third_chunk_decoded['vehicles'][b]['details']= _Decoder.decode_details(third_chunk_decoded['vehicles'][b]['details'].encode('raw_unicode_escape'))
              third_chunk_decoded['players'][ third_chunk_decoded['vehicles'][b]['accountDBID'] ]["vehicleid"]=b
          processing =4; break
        else: processing =15; break
      else: processing =14; break


# All states that we can handle broke out of the While loop at this point.
# Unhandled cases trigger this.
    processing =20; break

  f.close()

  if chunks_bitmask&5 ==5:
# lets check if pickle belongs to this replay
# this is weak check, we only compare map and game mode, It can still pass some corrupted ones
    if maps[ third_chunk_decoded['common']['arenaTypeID'] & 65535 ][0] !=first_chunk_decoded['mapName'] or \
       gameplayid[ third_chunk_decoded['common']['arenaTypeID'] >>16] != first_chunk_decoded['gameplayID']:
#      print("EERRRROOOORRRrrrrrr!!!one77")
#      print("json:  ", first_chunk_decoded['mapName'])
#      print("pickle:", maps[ third_chunk_decoded['common']['arenaTypeID'] & 65535 ])
#      print("json:  ", first_chunk_decoded['gameplayID'])
#      print("pickle:", gameplayid[ third_chunk_decoded['common']['arenaTypeID'] >>16])
      processing =8
#      chunks_bitmask = chunks_bitmask^4
#      print(datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S'))
#      print( datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime']))
#      print( mapidname[ chunks[2]['common']['arenaTypeID'] & 65535 ])

#guesstimating version, reliable only since 8.6 because WG added version string, earlier ones can be ~guessed by counting data or comparing dates
  if chunks_bitmask&1 ==1:
   if "clientVersionFromExe" in first_chunk_decoded:
    version = int(first_chunk_decoded["clientVersionFromExe"].replace(', ',''))
#    print (first_chunk_decoded["clientVersionFromExe"], version)
   else:
    
#8.7
#July 29, 2013
#8.4
#05 Mar 2013
#8.3
#16 Jan 2013
#8.2
#12 Dec 2012
#8.1
#Mar 13 2013
#8.0
#Sep 24 2012 
#7.5
#04.08.2012
#7.4
#20.06.2012
#7.3
#11.05.2012
#7.2
#30.03.2012
#7.1
#05.01.2012
#7.0
#19.12.2011
#6.7
#15.09.2011
#6.6
#10.08.2011
#6.5
#Jun 14 2011
#6.4
#Mar 12 2011
#6.3.11
#Apr 12 2011
#6.3.10
#Apr 07 2011
#6.3.9
#Mar 22 2011
#6.3
#Jan 15 2011
#6.2.8
#Dec 28 2010
#6.2.7
#Dec 23 2010
#6.2
#Dec 01 2010
#6.1.5
#Sep 28 2010
#5.5
#Oct 21 2010
#5.4.1
#Jul 16 2010    

    version = 830 # no clue, lets default to safe 8.3
  else:
   version = 0 #no first chunk = no version

# returns decoded_chunk[0:3], bitmap of available chunks, decoder status, ~version
  return (first_chunk_decoded, second_chunk_decoded, third_chunk_decoded), chunks_bitmask, processing, version

Example 49

Project: webrecorder Source File: usercontroller.py
    def init_routes(self):

        @self.app.get(['/api/v1/dashboard', '/api/v1/dashboard/'])
        @self.manager.admin_view()
        def api_dashboard():
            cache_key = self.cache_template.format('dashboard')
            expiry = 5 * 60  # 5 min

            cache = self.manager.redis.get(cache_key)

            if cache:
                return json.loads(cache.decode('utf-8'))

            users = self.manager.get_users().items()
            results = []

            # add username and get collections
            for user, data in users:
                data['username'] = user
                results.append(data)

            temp = self.manager.redis.hgetall(self.temp_usage_key)
            user = self.manager.redis.hgetall(self.user_usage_key)
            temp = [(k.decode('utf-8'), int(v)) for k, v in temp.items()]
            user = [(k.decode('utf-8'), int(v)) for k, v in user.items()]

            data = {
                'users': UserSchema().load(results, many=True).data,
                'collections': self.manager.get_collections(user='*', api=True),
                'temp_usage': sorted(temp, key=itemgetter(0)),
                'user_usage': sorted(user, key=itemgetter(0)),
            }

            self.manager.redis.setex(cache_key,
                                     expiry,
                                     json.dumps(data, cls=CustomJSONEncoder))

            return data


        @self.app.get(['/api/v1/users', '/api/v1/users/'])
        @self.manager.admin_view()
        def api_users():
            """Full admin API resource of all users.
               Containing user info and public collections

               - Provides basic (1 dimension) RESTful sorting
               - TODO: Pagination
            """
            sorting = request.query.getunicode('sort', None)
            sort_key = sub(r'^-{1}?', '', sorting) if sorting is not None else None
            reverse = sorting.startswith('-') if sorting is not None else False

            def dt(d):
                return datetime.strptime(d, '%Y-%m-%d %H:%M:%S.%f')

            # sortable fields, with optional key unpacking functions
            filters = {
                'created': {'key': lambda obj: dt(obj[1]['creation_date'])},
                'email': {'key': lambda obj: obj[1]['email_addr']},
                'last_login': {'key': lambda obj: dt(obj[1]['last_login'])},
                'name': {'key': lambda obj: json.loads(obj[1]['desc'] or '{}')['name']},
                'username': {},
            }

            if sorting is not None and sort_key not in filters:
                raise HTTPError(400, 'Bad Request')

            sort_by = filters[sort_key] if sorting is not None else {}
            users = sorted(self.manager.get_users().items(),
                           **sort_by,
                           reverse=reverse)

            results = []

            # add username and get collections
            for user, data in users:
                data['username'] = user
                # add space usage
                total = self.manager.get_size_allotment(user)
                used = self.manager.get_size_usage(user)
                data['space_utilization'] = {
                    'total': total,
                    'used': used,
                    'available': total - used,
                }
                results.append(data)

            return {
                # `results` is a list so will always read as `many`
                'users': UserSchema().load(results, many=True).data
            }

        @self.app.get('/api/v1/anon_user')
        def get_anon_user():
            return {'anon_user': self.manager.get_anon_user(True)}

        @self.app.get('/api/v1/temp-users')
        @self.manager.admin_view()
        def temp_users():
            """ Resource returning active temp users
            """
            temp_users_keys = self.manager.redis.keys('u:{0}*'.format(self.temp_user_key))
            temp_users = []

            if len(temp_users_keys):
                with self.manager.redis.pipeline() as pi:
                    for user in temp_users_keys:
                        pi.hgetall(user)
                    temp_users = pi.execute()

                for idx, user in enumerate(temp_users_keys):
                    temp_users[idx][b'username'] = user

                # convert bytestrings, skip over incomplete
                temp_users = [{k.decode('utf-8'): v.decode('utf-8') for k, v in d.items()}
                              for d in temp_users
                              if b'max_size' in d and b'created_at' in d]

                for user in temp_users:
                    total = int(user['max_size'])
                    used = int(user.get('size', 0))
                    creation = datetime.fromtimestamp(int(user['created_at']))
                    removal = creation + timedelta(seconds=self.config['session.durations']['short']['total'])

                    u = re.search(r'{0}\w+'.format(self.temp_user_key),
                                  user['username']).group()
                    user['username'] = u
                    user['removal'] = removal.isoformat()
                    user['space_utilization'] = {
                        'total': total,
                        'used': used,
                        'available': total - used,
                    }

                temp_users, err = TempUserSchema().load(temp_users, many=True)
                if err:
                    return {'errors': err}

            return {'users': temp_users}

        @self.app.post('/api/v1/users/<user>/desc')
        def update_desc(user):
            """legacy, eventually move to the patch endpoint"""
            desc = request.body.read().decode('utf-8')

            self.manager.set_user_desc(user, desc)
            return {}

        @self.app.post(['/api/v1/users', '/api/v1/users/'])
        @self.manager.admin_view()
        def api_create_user():
            """API enpoint to create a user with schema validation"""
            users = self.manager.get_users()
            emails = [u[1]['email_addr'] for u in users.items()]
            data = request.json
            err = NewUserSchema().validate(data)

            if 'username' in data and data['username'] in users:
                if not err:
                    return {'errors': 'Username already exists'}
                else:
                    err.update({'username': 'Username already exists'})

            if 'email' in data and data['email'] in emails:
                if not err:
                    return {'errors': 'Email already exists'}
                else:
                    err.update({'email': 'Email already exists'})

            # validate
            if len(err):
                return {'errors': err}

            # create user
            self.manager.cork._store.users[data['username']] = {
                'role': data['role'],
                'hash': self.manager.cork._hash(data['username'],
                                                data['password']).decode('ascii'),
                'email_addr': data['email'],
                'desc': '{{"name":"{name}"}}'.format(name=data.get('name', '')),
                'creation_date': str(datetime.utcnow()),
                'last_login': str(datetime.utcnow()),
            }
            self.manager.cork._store.save_users()

            # add user account defaults
            key = self.manager.user_key.format(user=data['username'])
            now = int(time.time())

            max_size, max_coll = self.manager.redis.hmget('h:defaults',
                                                          ['max_size', 'max_coll'])
            if not max_size:
                max_size = self.manager.default_max_size

            if not max_coll:
                max_coll = self.manager.default_max_coll

            with redis.utils.pipeline(self.manager.redis) as pi:
                pi.hset(key, 'max_size', max_size)
                pi.hset(key, 'max_coll', max_coll)
                pi.hset(key, 'created_at', now)
                pi.hset(key, 'name', data.get('name', ''))
                pi.hsetnx(key, 'size', '0')

            # create initial collection
            self.manager.create_collection(
                data['username'],
                coll=self.manager.default_coll['id'],
                coll_title=self.manager.default_coll['title'],
                desc=self.manager.default_coll['desc'].format(data['username']),
                public=False,
                synthetic=True
            )

            # Check for mailing list management
            if self.manager.mailing_list:
                self.manager.add_to_mailing_list(
                    data['username'],
                    data['email'],
                    data.get('name', ''),
                )

        @self.app.get(['/api/v1/users/<username>', '/api/v1/users/<username>/'])
        @self.manager.admin_view()
        def api_get_user(username):
            """API enpoint to return user info"""
            users = self.manager.get_users()

            if username not in users:
                self._raise_error(404, 'No such user')

            user = users[username]

            # assemble space usage
            total = self.manager.get_size_allotment(username)
            used = self.manager.get_size_usage(username)
            user['space_utilization'] = {
                'total': total,
                'used': used,
                'available': total - used,
            }

            user_data, err = UserSchema(exclude=('username',)).load(user)
            colls = self.manager.get_collections(username,
                                                 include_recs=True,
                                                 api=True)

            for coll in colls:
                for rec in coll['recordings']:
                    rec['pages'] = self.manager.list_pages(username,
                                                           coll['id'],
                                                           rec['id'])

            # colls is a list so will always be `many` even if one collection
            collections, err = CollectionSchema().load(colls, many=True)
            user_data['collections'] = collections

            return {'user': user_data}

        @self.app.put(['/api/v1/users/<username>', '/api/v1/users/<username>/'])
        @self.manager.auth_view()
        def api_update_user(username):
            """API enpoint to update user info

               See `UserUpdateSchema` for available fields.

               ** bottle 0.12.9 doesn't support `PATCH` methods.. update to
                  patch once availabile.
            """
            users = self.manager.get_users()
            if username not in users:
                self._raise_error(404, 'No such user')

            # if not admin, check ownership
            if not self.manager.is_anon(username) and not self.manager.is_superuser():
                self.manager.assert_user_is_owner(username)

            user = users[username]
            try:
                json_data = json.loads(request.forms.json)
            except Exception as e:
                print(e)
                return {'errors': 'bad json data'}

            if len(json_data.keys()) == 0:
                return {'errors': 'empty payload'}

            data, err = UserUpdateSchema(only=json_data.keys()).load(json_data)

            if len(err):
                return {'errors': err}

            if 'name' in data:
                user['desc'] = '{{"name":"{name}"}}'.format(name=data.get('name', ''))

            #
            # restricted resources
            #
            if 'max_size' in data and self.manager.is_superuser():
                key = self.manager.user_key.format(user=username)
                max_size = data.get('max_size', self.manager.default_max_size)
                max_size = int(max_size) if type(max_size) is not int else max_size

                with redis.utils.pipeline(self.manager.redis) as pi:
                    pi.hset(key, 'max_size', max_size)

            if 'role' in data and self.manager.is_superuser():
                # set new role or default to base role
                user['role'] = data.get('role', 'archivist')

            #
            # return updated user data
            #
            total = self.manager.get_size_allotment(username)
            used = self.manager.get_size_usage(username)
            user['space_utilization'] = {
                'total': total,
                'used': used,
                'available': total - used,
            }

            user_data, err = UserSchema(exclude=('username',)).load(user)
            colls = self.manager.get_collections(username,
                                                 include_recs=True,
                                                 api=True)

            for coll in colls:
                for rec in coll['recordings']:
                    rec['pages'] = self.manager.list_pages(username,
                                                           coll['id'],
                                                           rec['id'])

            # colls is a list so will always be `many` even if one collection
            collections, err = CollectionSchema().load(colls, many=True)
            user_data['collections'] = collections

            return {'user': user_data}

        @self.app.delete(['/api/v1/users/<user>', '/api/v1/users/<user>/'])
        @self.manager.admin_view()
        def api_delete_user(user):
            """API enpoint to delete a user"""
            if user not in self.manager.get_users():
                self._raise_error(404, 'No such user')

            self.manager.delete_user(user)

        @self.app.get(['/<user>', '/<user>/'])
        @self.jinja2_view('user.html')
        def user_info(user):
            self.redir_host()

            if self.manager.is_anon(user):
                self.redirect('/' + user + '/temp')

            self.manager.assert_user_exists(user)

            result = {
                'user': user,
                'user_info': self.manager.get_user_info(user),
                'collections': self.manager.get_collections(user),
            }

            if not result['user_info'].get('desc'):
                result['user_info']['desc'] = self.default_user_desc.format(user)

            return result

        # User Account Settings
        @self.app.get('/<user>/_settings')
        @self.jinja2_view('account.html')
        def account_settings(user):
            self.manager.assert_user_is_owner(user)

            return {'user': user,
                    'user_info': self.manager.get_user_info(user),
                    'num_coll': self.manager.num_collections(user),
                   }

        # Delete User Account
        @self.app.post('/<user>/$delete')
        def delete_user(user):
            if self.manager.delete_user(user):
                self.flash_message('The user {0} has been permanently deleted!'.format(user), 'success')

                redir_to = '/'
                request.environ['webrec.delete_all_cookies'] = 'all'
                self.manager.cork.logout(success_redirect=redir_to, fail_redirect=redir_to)
            else:
                self.flash_message('There was an error deleting {0}'.format(coll))
                self.redirect(self.get_path(user))

        # Expiry Message
        @self.app.route('/_expire')
        def expire():
            self.flash_message('Sorry, the anonymous collection has expired due to inactivity')
            self.redirect('/')

        @self.app.post('/_reportissues')
        def report_issues():
            useragent = request.headers.get('User-Agent')

            @self.jinja2_view('email_error.html')
            def error_email(params):
                ua = UserAgent(params.get('ua'))
                if ua.browser:
                    browser = '{0} {1} {2} {3}'
                    lang = ua.language or ''
                    browser = browser.format(ua.platform, ua.browser,
                                             ua.version, lang)

                    params['browser'] = browser
                else:
                    params['browser'] = ua.string

                params['time'] = params['time'][:19]
                return params

            self.manager.report_issues(request.POST, useragent, error_email)
            return {}

        # Skip POST request recording
        @self.app.get('/_skipreq')
        def skip_req():
            url = request.query.getunicode('url')
            user = self.manager.get_curr_user()
            if not user:
                user = self.manager.get_anon_user()

            self.manager.skip_post_req(user, url)
            return {}

Example 50

Project: api-samples Source File: 01_EventRegexProperties.py
def main():

    # create the api client
    client = client_module.RestApiClient(version='7.0')

    # -------------------------------------------------------------------------
    # 1. get a list of event regex property
    endpoint_url = 'config/event_sources/custom_properties/regex_properties'
    http_method = 'GET'

    # select fields to return for each event regex property
    fields = 'id, name, property_type'

    # use filter to select desired event regex property
    query_filter = 'property_type = "numeric"'

    # populate the optional parameters to be used in request
    params = {'fields': fields, 'filter': query_filter}

    # put range in header for paging purpose
    headers = {'range': 'items=0-4'}

    # send the request
    response = client.call_api(endpoint_url, http_method, params=params,
                               headers=headers, print_request=True)

    # check response and handle any error
    if response.code == 200:
        regex_properties = json.loads(response.read().decode('utf-8'))

        # go through the list of event regex properties and print each
        for regex_property in regex_properties:
            print(regex_property)

    else:
        SampleUtilities.pretty_print_response(response)
        print('Failed to retrieve the list of event regex properties')
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 2. create a new event regex property

    endpoint_url = 'config/event_sources/custom_properties/regex_properties'
    http_method = 'POST'

    # sample event regex property, be sure to change the name if running
    # multiple times.
    new_regex_property = {
                                "name": "Sample event regex property x",
                                "description": "description property",
                                "property_type": "numeric",
                                "use_for_rule_engine": True,
                                }

    data = json.dumps(new_regex_property).encode('utf-8')

    headers = {'Content-type': 'application/json'}

    # send the request
    response = client.call_api(endpoint_url, http_method, data=data,
                               headers=headers, print_request=True)

    # check response and handle any error
    if response.code == 201:
        print('A new event regex property is created.')
        # can extract newly created event regex property from the response
        regex_property = json.loads(response.read().decode('utf-8'))
        print(json.dumps(regex_property, indent=4))
    else:
        print('Failed to create the new event regex property')
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 3. get a single event regex property by id

    # id of the event regex property, using the one created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/event_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id))
    http_method = 'GET'

    # send the request
    response = client.call_api(endpoint_url, http_method, print_request=True)

    # check response and handle any error
    if response.code == 200:
        print("The requested event regex property has been retrieved.")
        regex_property = json.loads(response.read().decode('utf-8'))
        print(json.dumps(regex_property, indent=4))
    else:
        print('Failed to retrieve the event regex property with id=' +
              str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 4. update an event regex property by its id

    # using event regex property created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/event_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id))
    http_method = 'POST'

    fields_to_update = {
                        'description': 'updated description',
                        'use_for_rule_engine': False,
                        }

    data = json.dumps(fields_to_update).encode('utf-8')

    headers = {'Content-type': 'application/json'}

    # send the request
    response = client.call_api(endpoint_url, http_method, data=data,
                               headers=headers, print_request=True)

    if response.code == 200:
        print('The event regex property has been successfully updated.')
        regex_property = json.loads(response.read().decode('utf-8'))
        print(json.dumps(regex_property, indent=4))
    else:
        print('Failed to update the event regex property with id=' +
              str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 5. find dependents of an event regex property

    # using event regex property created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/event_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id)) + '/dependents'
    http_method = 'GET'

    # send the request
    response = client.call_api(endpoint_url, http_method, print_request=True)

    if response.code == 202:
        print('The find dependents task for event regex property has started.')
        task_status = json.loads(response.read().decode('utf-8'))
        print(json.dumps(task_status, indent=4))

        task_status_url = ('/config/event_sources/custom_properties/' +
                           'regex_property_dependent_tasks' + '/' +
                           str(task_status['id']))

        task_manager = TaskManager(client, task_status_url)

        try:
            task_manager.wait_for_task_to_complete(60)

            # query the result endpoint for results

            endpoint_url = ('config/event_sources/custom_properties/' +
                            'regex_property_dependent_tasks' + '/' +
                            str(task_status['id']) + '/results')
            http_method = 'GET'

            response = client.call_api(endpoint_url, http_method,
                                       print_request=True)

            # check response and handle any error
            if response.code == 200:
                task_result = json.loads(response.read().decode('utf-8'))
                print(json.dumps(task_result, indent=4))

            else:
                SampleUtilities.pretty_print_response(response)
                print('Failed to retrieve the result of find dependents task.')
                sys.exit(1)

        except TimeoutError:
            print("Find dependents task time out. Current status is:")
            SampleUtilities.pretty_print_response(
                              task_manager.get_task_status()
                              )

    else:
        print('Failed to start a find dependents task for ' +
              'event regex property with id=' + str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)

    # -------------------------------------------------------------------------
    # 6. delete an event regex property

    # using event regex property created in step 2
    regex_property_id = regex_property['id']

    endpoint_url = ('config/event_sources/custom_properties/regex_properties' +
                    '/' + str(regex_property_id))
    http_method = 'DELETE'

    # send the request
    response = client.call_api(endpoint_url, http_method, print_request=True)

    if response.code == 202:
        print('The deletion task for event regex property has started.')
        task_status = json.loads(response.read().decode('utf-8'))
        print(json.dumps(task_status, indent=4))

        task_status_url = ('/config/event_sources/custom_properties/' +
                           'regex_property_delete_tasks' + '/' +
                           str(task_status['id']))

        task_manager = TaskManager(client, task_status_url)

        try:
            task_manager.wait_for_task_to_complete(60)
        except TimeoutError:
            print("Deletion task time out. Current status is:")
            SampleUtilities.pretty_print_response(
                              task_manager.get_task_status()
                              )

    else:
        print('Failed to start a deletion task for ' +
              'event regex property with id=' + str(regex_property_id))
        SampleUtilities.pretty_print_response(response)
        sys.exit(1)
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3 Page 4