subprocess.PIPE

Here are the examples of the python api subprocess.PIPE taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

173 Examples 7

Example 151

Project: BiliDan Source File: bilidan.py
def biligrab(url, *, debug=False, verbose=False, media=None, comment=None, cookie=None, quality=None, source=None, keep_fps=False, mpvflags=[], d2aflags={}, fakeip=None):

    url_get_metadata = 'http://api.bilibili.com/view?'
    url_get_comment = 'http://comment.bilibili.com/%(cid)s.xml'
    if source == 'overseas':
        url_get_media = 'http://interface.bilibili.com/v_cdn_play?'
    else:
        url_get_media = 'http://interface.bilibili.com/playurl?'

    def parse_url(url):
        '''Parse a bilibili.com URL

        Return value: (aid, pid)
        '''
        if url.startswith('cid:'):
            try:
                return int(url[4:]), 'cid'
            except ValueError:
                raise ValueError('Invalid CID: %s' % url[4:])
        regex = re.compile('(?:http:/*[^/]+/(?:video/)?)?av(\\d+)(?:/|/index.html|/index_(\\d+).html)?(?:\\?|#|$)')
        regex_match = regex.match(url)
        if not regex_match:
            raise ValueError('Invalid URL: %s' % url)
        aid = regex_match.group(1)
        pid = regex_match.group(2) or '1'
        return aid, pid

    def fetch_video_metadata(aid, pid):
        '''Fetch video metadata

        Arguments: aid, pid

        Return value: {'cid': cid, 'title': title}
        '''
        req_args = {'type': 'json', 'appkey': codecs.decode(APPKEY,'rot13'), 'id': aid, 'page': pid}
        req_args['sign'] = bilibili_hash(req_args)
        _, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
        # A naive fix (judge if it is -404, I choose '-' :)
        if(response[8] == 45):
            req_args = {'type': 'json', 'appkey': codecs.decode(APPKEY,'rot13'), 'id': aid, 'page': 1}
            req_args['sign'] = bilibili_hash(req_args)
            _, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
        try:
            response = dict(json.loads(response.decode('utf-8', 'replace')))
        except (TypeError, ValueError):
            raise ValueError('Can not get \'cid\' from %s' % url)
        if 'error' in response:
            logging.error('Error message: %s' % response.get('error'))
        if 'cid' not in response:
            raise ValueError('Can not get \'cid\' from %s' % url)
        return response

    def get_media_urls(cid, *, cuem_you_bishi_mode=False):
        '''Request the URLs of the video

        Arguments: cid

        Return value: [media_urls]
        '''
        if source in {None, 'overseas'}:
            user_agent = USER_AGENT_API if not feck_you_bishi_mode else USER_AGENT_PLAYER
            req_args = {'cid': cid}
            if quality is not None:
                req_args['quality'] = quality
            else:
                req_args['quality'] = None
            _, response = fetch_url(url_get_media+andro_mock(req_args), user_agent=user_agent, cookie=cookie, fakeip=fakeip)
            '''
            media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
            '''
            json_obj = json.loads(response.decode('utf-8'))
            if json_obj['result'] != 'suee':  # => Not Success
                raise ValueError('Server returned an error: %s (%s)' % (json_obj['result'], json_obj['code']))
            media_urls = [str(i['url']).strip() for i in json_obj['durl']]
            if not feck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
                logging.error('Detected User-Agent block. Switching to feck-you-bishi mode.')
                return get_media_urls(cid, feck_you_bishi_mode=True)
        elif source == 'html5':
            req_args = {'aid': aid, 'page': pid}
            logging.warning('HTML5 video source is experimental and may not always work.')
            _, response = fetch_url('http://www.bilibili.com/m/html5?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
            response = json.loads(response.decode('utf-8', 'replace'))
            media_urls = [dict.get(response, 'src')]
            if not media_urls[0]:
                media_urls = []
            if not feck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
                logging.error('Failed to request HTML5 video source. Retrying.')
                return get_media_urls(cid, feck_you_bishi_mode=True)
        elif source == 'flvcd':
            req_args = {'kw': url}
            if quality is not None:
                if quality == 3:
                    req_args['quality'] = 'high'
                elif quality >= 4:
                    req_args['quality'] = 'super'
            _, response = fetch_url('http://www.flvcd.com/parse.php?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
            resp_match = re.search('<input type="hidden" name="inf" value="([^"]+)"', response.decode('gbk', 'replace'))
            if resp_match:
                media_urls = resp_match.group(1).rstrip('|').split('|')
            else:
                media_urls = []
        elif source == 'bilipr':
            req_args = {'cid': cid}
            quality_arg = '1080' if quality is not None and quality >= 4 else '720'
            logging.warning('BilibiliPr video source is experimental and may not always work.')
            resp_obj, response = fetch_url('http://pr.lolly.cc/P%s?%s' % (quality_arg, urllib.parse.urlencode(req_args)), user_agent=USER_AGENT_PLAYER)
            if resp_obj.getheader('Content-Type', '').startswith('text/xml'):
                media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
            else:
                media_urls = []
        else:
            assert source in {None, 'overseas', 'html5', 'flvcd', 'bilipr'}
        if len(media_urls) == 0 or media_urls == ['http://static.hdslb.com/error.mp4']:
            raise ValueError('Can not get valid media URLs.')
        return media_urls

    def get_video_size(media_urls):
        '''Determine the resolution of the video

        Arguments: [media_urls]

        Return value: (width, height)
        '''
        try:
            if media_urls[0].startswith('http:') or media_urls[0].startswith('https:'):
                ffprobe_command = ['ffprobe', '-icy', '0', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '-timeout', '60000000', '-user-agent', USER_AGENT_PLAYER, '--', media_urls[0]]
            else:
                ffprobe_command = ['ffprobe', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '--', media_urls[0]]
            log_command(ffprobe_command)
            ffprobe_process = subprocess.Popen(ffprobe_command, stdout=subprocess.PIPE)
            try:
                ffprobe_output = json.loads(ffprobe_process.communicate()[0].decode('utf-8', 'replace'))
            except KeyboardInterrupt:
                logging.warning('Cancelling getting video size, press Ctrl-C again to terminate.')
                ffprobe_process.terminate()
                return 0, 0
            width, height, widthxheight = 0, 0, 0
            for stream in dict.get(ffprobe_output, 'streams') or []:
                if dict.get(stream, 'width')*dict.get(stream, 'height') > widthxheight:
                    width, height = dict.get(stream, 'width'), dict.get(stream, 'height')
            return width, height
        except Exception as e:
            log_or_raise(e, debug=debug)
            return 0, 0

    def convert_comments(cid, video_size):
        '''Convert comments to ASS subtitle format

        Arguments: cid

        Return value: comment_out -> file
        '''
        _, resp_comment = fetch_url(url_get_comment % {'cid': cid}, cookie=cookie)
        comment_in = io.StringIO(resp_comment.decode('utf-8', 'replace'))
        comment_out = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8-sig', newline='\r\n', prefix='tmp-danmaku2ass-', suffix='.ass', delete=False)
        logging.info('Invoking Danmaku2ASS, converting to %s' % comment_out.name)
        d2a_args = dict({'stage_width': video_size[0], 'stage_height': video_size[1], 'font_face': 'SimHei', 'font_size': math.ceil(video_size[1]/21.6), 'text_opacity': 0.8, 'duration_marquee': min(max(6.75*video_size[0]/video_size[1]-4, 3.0), 8.0), 'duration_still': 5.0}, **d2aflags)
        for i, j in ((('stage_width', 'stage_height', 'reserve_blank'), int), (('font_size', 'text_opacity', 'comment_duration', 'duration_still', 'duration_marquee'), float)):
            for k in i:
                if k in d2aflags:
                    d2a_args[k] = j(d2aflags[k])
        try:
            danmaku2ass.Danmaku2ASS(input_files=[comment_in], input_format='Bilibili', output_file=comment_out, **d2a_args)
        except Exception as e:
            log_or_raise(e, debug=debug)
            logging.error('Danmaku2ASS failed, comments are disabled.')
        comment_out.flush()
        comment_out.close()  # Close the temporary file early to fix an issue related to Windows NT file sharing
        return comment_out

    def launch_player(video_metadata, media_urls, comment_out, is_playlist=False, increase_fps=True):
        '''Launch MPV media player

        Arguments: video_metadata, media_urls, comment_out

        Return value: player_exit_code -> int
        '''
        mpv_version_master = tuple(int(i) if i.isdigit() else float('inf') for i in check_env.mpv_version.split('-', 1)[0].split('.'))
        mpv_version_gte_0_10 = mpv_version_master >= (0, 10)
        mpv_version_gte_0_6 = mpv_version_gte_0_10 or mpv_version_master >= (0, 6)
        mpv_version_gte_0_4 = mpv_version_gte_0_6 or mpv_version_master >= (0, 4)
        logging.debug('Compare mpv version: %s %s 0.10' % (check_env.mpv_version, '>=' if mpv_version_gte_0_10 else '<'))
        logging.debug('Compare mpv version: %s %s 0.6' % (check_env.mpv_version, '>=' if mpv_version_gte_0_6 else '<'))
        logging.debug('Compare mpv version: %s %s 0.4' % (check_env.mpv_version, '>=' if mpv_version_gte_0_4 else '<'))
        if increase_fps:  # If hardware decoding (without -copy suffix) is used, do not increase fps
            for i in mpvflags:
                i = i.split('=', 1)
                if 'vdpau' in i or 'vaapi' in i or 'vda' in i:
                    increase_fps = False
                    break
        command_line = ['mpv', '--autofit', '950x540']
        if mpv_version_gte_0_6:
            command_line += ['--cache-file', 'TMP']
        if increase_fps and mpv_version_gte_0_6:  # Drop frames at vo side but not at decoder side to prevent A/V sync issues
            command_line += ['--framedrop', 'vo']
        command_line += ['--http-header-fields', 'User-Agent: '+USER_AGENT_PLAYER.replace(',', '\\,')]
        if mpv_version_gte_0_6:
            if mpv_version_gte_0_10:
                command_line += ['--force-media-title', video_metadata.get('title', url)]
            else:
                command_line += ['--media-title', video_metadata.get('title', url)]
        if is_playlist or len(media_urls) > 1:
            command_line += ['--merge-files']
        if mpv_version_gte_0_4:
            command_line += ['--no-video-aspect', '--sub-ass', '--sub-file', comment_out.name]
        else:
            command_line += ['--no-aspect', '--ass', '--sub', comment_out.name]
        if increase_fps:
            if mpv_version_gte_0_6:
                command_line += ['--vf', 'lavfi="fps=fps=60:round=down"']
            else:  # Versions < 0.6 have an A/V sync related issue
                command_line += ['--vf', 'lavfi="fps=fps=50:round=down"']
        command_line += mpvflags
        if is_playlist:
            command_line += ['--playlist']
        else:
            command_line += ['--']
        command_line += media_urls
        log_command(command_line)
        player_process = subprocess.Popen(command_line)
        try:
            player_process.wait()
        except KeyboardInterrupt:
            logging.info('Terminating media player...')
            try:
                player_process.terminate()
                try:
                    player_process.wait(timeout=2)
                except subprocess.TimeoutExpired:
                    logging.info('Killing media player by force...')
                    player_process.kill()
            except Exception:
                pass
            raise
        return player_process.returncode

    aid, pid = parse_url(url)

    logging.info('Loading video info...')
    if pid != 'cid':
        video_metadata = fetch_video_metadata(aid, pid)
    else:
        video_metadata = {'cid': aid, 'title': url}
    logging.info('Got video cid: %s' % video_metadata['cid'])

    logging.info('Loading video content...')
    if media is None:
        media_urls = get_media_urls(video_metadata['cid'])
    else:
        media_urls = [media]
    logging.info('Got media URLs:'+''.join(('\n      %d: %s' % (i+1, j) for i, j in enumerate(media_urls))))

    logging.info('Determining video resolution...')
    video_size = get_video_size(media_urls)
    logging.info('Video resolution: %sx%s' % video_size)
    if video_size[0] > 0 and video_size[1] > 0:
        video_size = (video_size[0]*1080/video_size[1], 1080)  # Simply fix ASS resolution to 1080p
    else:
        log_or_raise(ValueError('Can not get video size. Comments may be wrongly positioned.'), debug=debug)
        video_size = (1920, 1080)

    logging.info('Loading comments...')
    if comment is None:
        comment_out = convert_comments(video_metadata['cid'], video_size)
    else:
        comment_out = open(comment, 'r')
        comment_out.close()

    logging.info('Launching media player...')
    player_exit_code = launch_player(video_metadata, media_urls, comment_out, increase_fps=not keep_fps)

    if comment is None and player_exit_code == 0:
        os.remove(comment_out.name)

    return player_exit_code

Example 152

Project: pocketsphinx-python Source File: main.py
        def run(self):
            '''Is called when thread.start is run.'''
            #Create a pyaudio stream to later record from the microphone
            p = pyaudio.PyAudio()
            stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024)

            count = 1                   #Used for training-file naming
            ma_colorbutton.show()

            filename = 'training'       #specify filename: training_1.wav; training_2.wav....

            if not os.path.exists(working_dir):
                os.mkdir(working_dir)

            #Go trough every sentence from the textfile, whose sentences are stored in the list self.sentences
            for sentence in self.sentences:
                gtk.gdk.threads_enter()
                ma_info_textbuffer.set_text("You have to read the following sentence.")
                ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), "Click on the button. Start reading, if the button gets green. ")
                ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), "If you are ready wait 1 second and click the button again.\n\n")
                gtk.gdk.threads_leave()
                print(sentence.strip())
                ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), sentence.strip())
                global ma_is_clicked
                ma_is_clicked = False
                
                #Wait for the initial click of the "record" button
                while not ma_is_clicked and self.running:
                    time.sleep(0.2)
                if not self.running:
                    return

                #change the color from red to green, so that the user knows, when to start speak
                map = ma_colorbutton.get_colormap()
                color = map.alloc_color("red")
                style = ma_colorbutton.get_style().copy()
                style.bg[gtk.STATE_NORMAL] = color
                style.bg[gtk.STATE_PRELIGHT] = color
                ma_colorbutton.set_style(style)
                time.sleep(1)
                map = ma_colorbutton.get_colormap()
                color = map.alloc_color("green")
                style = ma_colorbutton.get_style().copy()
                style.bg[gtk.STATE_NORMAL] = color
                style.bg[gtk.STATE_PRELIGHT] = color
                ma_colorbutton.set_style(style)


                #create a array to store the data from the stream and write it later to a .wav file
                data_all = array('h')
                ma_is_clicked = False
                stream.start_stream()            #Start the pyaudio recording stream
                #While the model-adaption thread is running and no button is clicked: record sound.
                while self.running and not ma_is_clicked:
                    buf = stream.read(1024)           #read first chunk from the mic-stream
                    if buf:
                        data_chunk = array('h', buf)
                        percent = max(data_chunk)/1000.
                        if percent > 1.0:
                            percent = 1.0
                        #We have to do the following, because we are not in the main thread and want to
                        #modify the value of a progressbar
                        gtk.gdk.threads_enter()         
                        ma_level_progressbar.set_fraction(percent) #Set "level" of the progressbar, which indicates the input volume
                        gtk.gdk.threads_leave()
                        data_all.extend(data_chunk)
                    else:
                        break
                        
                #Stop the recording stream
                stream.stop_stream()

                #filename = self.name.split('/')[len(self.name.split('/'))-1][:-4]
                #filename = filename

                #After recording save the data to the wav-file.
                wf = wave.open(working_dir+filename+'_'+str(count)+'.wav', 'wb')
                wf.setnchannels(1)
                wf.setsampwidth(2)
                wf.setframerate(16000)
                data_all = pack('<' + ('h' * len(data_all)), *data_all)
                wf.writeframes(data_all)
                wf.close()

                #Now write the filename from above into .fileids and the sentence into transcription.
                #The following two files are used by the model-adaption, we do later after recording all sentences.
                with open(working_dir+filename+".fileids", "a") as f:
                    f.write(working_dir+filename+'_'+str(count)+'\n')
                with open(working_dir+filename+".transcription", "a") as f:
                    line = unicode(sentence).strip().upper()
                    line = "".join(c for c in line if c not in ('!', '.' ,':', ';', ',', '?'))
                    f.write('<s> '+line+' </s> ('+working_dir+filename+'_'+str(count)+')\n')

                count+=1    #Increase the counter for the filenames
                #Set the recording button to the "normal" state
                map = ma_colorbutton.get_colormap()
                color = map.alloc_color("white")
                style = ma_colorbutton.get_style().copy()
                style.bg[gtk.STATE_NORMAL] = color
                style.bg[gtk.STATE_PRELIGHT] = color
                ma_colorbutton.set_style(style)

            #Close the thread and terminate the pyaudio object. We don't need them anymore, because all sentences are already recorded
            stream.close()
            p.terminate()

            gtk.gdk.threads_enter()       #Again: needed, because we modify the GUI.
            ma_info_textbuffer.set_text("Now you have recorded all training data.\n\n")
            ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), "In the next seconds i'm trying to adapt it to the new model. ")
            gtk.gdk.threads_leave()

            def run_command(cmd):
                '''Run the model-adaption commands and display the output in realtime to the GUI and console-log'''
                print(' '.join(cmd))
                proc = subprocess.Popen(cmd,  stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                        universal_newlines=True,)

                for line in unbuffered(proc):
                    print(line)
                    ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), line+'\n')


            gtk.gdk.threads_enter()
            run_command(cmd = [sphinx_fe, '-argfile', hmm+'feat.params', '-samprate', '16000', '-c', working_dir+filename+'.fileids', '-di', '.',
                '-do', '.', '-ei', 'wav', '-eo', 'mfc', '-mswav', 'yes'])
            ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), "\n\nNext: Convert mdef to mdef.txt")
            gtk.gdk.threads_leave()

            time.sleep(3)        #We don't need the breaks here, but it is easier to follow all steps of adaption
            gtk.gdk.threads_enter()
            run_command(cmd = [pocketsphinx_mdef_convert, '-text', hmm+'mdef', working_dir+'mdef.txt'])
            ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), "\n\nNext: Run bw")
            gtk.gdk.threads_leave()

            time.sleep(3)
            gtk.gdk.threads_enter()
            run_command(cmd = [bw, '-hmmdir', hmm, '-moddeffn', working_dir+'mdef.txt', '-ts2cbfn', '.cont.',
                '-feat', '1s_c_d_dd', '-cmn', 'current', '-agc', 'none', '-dictfn', dic, '-ctlfn', working_dir+filename+'.fileids',
                '-lsnfn', working_dir+filename+'.transcription', '-lda', hmm+'feature_transform', '-accuemdir', working_dir])
            ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), "\n\nNext: Run mllr_solve")
            gtk.gdk.threads_leave()

            time.sleep(3)
            gtk.gdk.threads_enter()
            run_command(cmd = [mllr_solve, '-meanfn', hmm+'means', '-varfn', hmm+'variances', '-outmllrfn', working_dir+'mllr_matrix', '-accuemdir', working_dir])
            ma_info_textbuffer.insert(ma_info_textbuffer.get_end_iter(), "\n\nReady!! Check log for errors or warnings.")
            gtk.gdk.threads_leave()

            print("MA  is over.")

Example 153

Project: Sunflower Source File: viewer.py
Function: init
	def __init__(self, path, provider, parent):
		self._window = Gtk.Window(Gtk.WindowType.TOPLEVEL)

		self.path = path
		self._provider = provider
		self._parent = parent
		self._application = self._parent._parent
		self._page_count = 0
		self._options = self._application.options.section('viewer')

		associations_manager = self._application.associations_manager
		self._mime_type = associations_manager.get_mime_type(path)

		if associations_manager.is_mime_type_unknown(self._mime_type):
			data = associations_manager.get_sample_data(path, provider)
			self._mime_type = associations_manager.get_mime_type(data=data)

		# configure window
		self._window.set_title(_('{0} - Viewer').format(os.path.basename(self.path)))
		self._window.set_size_request(800, 600)
		self._window.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
		self._window.set_resizable(True)
		self._window.set_skip_taskbar_hint(False)
		self._window.set_wmclass('Sunflower', 'Sunflower')
		self._window.set_border_width(0)

		# connect signals
		self._window.connect('destroy', self._handle_destroy)
		self._window.connect('key-press-event', self._handle_key_press)

		# create user interface according to mime type
		vbox = Gtk.VBox(humogeneous=False, spacing=0)
		self.status_bar = StatusBar()
		self.status_bar.set_border_width(2)
		self.status_bar.add_group_with_icon('mime_type', 'docuement-properties', self._mime_type)
		self.status_bar.show()

		self._notebook = Gtk.Notebook()
		self._notebook.set_border_width(2)

		# create extensions
		self._create_extensions()

		# create page for executables
		if self._mime_type in ('application/x-executable', 'application/x-sharedlib') \
		and executable_exists('nm'):
			# get output from command
			data = ''
			try:
				output = subprocess.Popen(
									['nm', '-al', path],
									stdout=subprocess.PIPE
								).communicate()

				data = output[0]

			except OSError as error:
				# report error to user
				raise error

			# create new page
			self._create_text_page(_('Executable'), data)

		# create text page if needed
		if associations_manager.is_mime_type_subset(self._mime_type, 'text/plain'):
			# get data from the file
			raw_file = self._provider.get_file_handle(self.path, FileMode.READ)
			data = raw_file.read()
			raw_file.close()

			# create new page
			self._create_text_page(_('Text'), data)

		# create image page if needed
		if self._mime_type.startswith('image/'):
			container = Gtk.ScrolledWindow()
			container.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
			container.set_shadow_type(Gtk.ShadowType.NONE)
			container.set_border_width(5)
			viewport = Gtk.Viewport()
			image = Gtk.Image()

			# load raw data
			raw_file = provider.get_file_handle(path, FileMode.READ)
			raw_data = raw_file.read()
			raw_file.close()

			# get pixbuf from raw data
			try:
				loader = GdkPixbuf.PixbufLoader()
				loader.write(raw_data)
				loader.close()

			except GObject.GError:
				pass

			else:
				# set image
				image.set_from_pixbuf(loader.get_pixbuf())

			viewport.add(image)
			container.add(viewport)
			self._insert_page(_('Image'), container)

		# pack user interface
		vbox.pack_start(self._notebook, True, True, 0)
		vbox.pack_start(self.status_bar, False, False, 0)

		self._window.add(vbox)

		# show all widgets if there are pages present
		if self._page_count > 0:
			self._window.show_all()

		else:
			# show information and close window
			dialog = Gtk.MessageDialog(
									self._application,
									Gtk.DialogFlags.DESTROY_WITH_PARENT,
									Gtk.MessageType.INFO,
									Gtk.ButtonsType.OK,
									_('Viewer is unable to display this file type.')
								)
			dialog.run()
			dialog.destroy()

			self._window.destroy()

Example 154

Project: memsql-loader Source File: downloader.py
    def run(self):
        try:
            try:
                # This is at the top so that any exceptions that occur will
                # emit a KILL QUERY due to fifo.open()

                # if we are piping through a script, the fifo should block
                # because the downloader is polling the script's stdin instead
                # of the fifo
                blocking = self.job.spec.options.script is not None
                with self.fifo.open(blocking=blocking) as target_file:
                    # allocate an URL for the target file
                    if self.task.data['scheme'] == 's3':
                        if self.is_anonymous:
                            key_url = 'http://%(bucket)s.s3.amazonaws.com/%(path)s' % {
                                'bucket': self.key.bucket.name,
                                'path': self.key.name.encode('utf-8')
                            }
                        else:
                            key_url = self.key.generate_url(expires_in=3600)
                    elif self.task.data['scheme'] == 'hdfs':
                        host = self.job.spec.source.hdfs_host
                        port = self.job.spec.source.webhdfs_port
                        hdfs_user = self.job.spec.source.hdfs_user
                        key_name = self.key.name
                        key_url = webhdfs.get_webhdfs_url(
                            host, port, hdfs_user, 'OPEN', key_name)
                    elif self.task.data['scheme'] == 'file':
                        key_url = 'file://%(path)s' % {'path': self.key.name}
                    else:
                        assert False, 'Unsupported job with paths: %s' % [ str(p) for p in self.job.paths ]

                    self._curl = curl = pycurl.Curl()
                    curl.setopt(pycurl.URL, key_url)
                    curl.setopt(pycurl.NOPROGRESS, 0)
                    curl.setopt(pycurl.PROGRESSFUNCTION, self._progress)
                    curl.setopt(pycurl.SSL_VERIFYPEER, 0)
                    curl.setopt(pycurl.SSL_VERIFYHOST, 0)
                    curl.setopt(pycurl.CONNECTTIMEOUT, 30)

                    if self.job.spec.options.script is not None:
                        self.script_proc = subprocess.Popen(
                            ["/bin/bash", "-c", self.job.spec.options.script],
                            stdout=target_file.fileno(),
                            stdin=subprocess.PIPE)

                        # check that script hasn't errored before downloading
                        # NOTE: we wait here so that we can check if a script exits prematurely
                        # if this is the case, we fail the job without requeueing
                        time.sleep(1)
                        if self.script_proc.poll() is not None:
                            self.logger.error('Script `%s` exited prematurely with return code %d' % (self.job.spec.options.script, self.script_proc.returncode))
                            raise WorkerException('Script `%s` exited prematurely with return code %d' % (self.job.spec.options.script, self.script_proc.returncode))

                        # If we're piping data into a script and this file is
                        # a gzipped file, we'll decompress the data ourselves
                        # before piping it into the script.
                        if self.task.data['key_name'].endswith('.gz'):
                            # Set the window bits during decompression to
                            # zlib.MAX_WBITS | 32 tells the zlib library to
                            # automatically detect gzip headers.
                            self.decompress_obj = zlib.decompressobj(zlib.MAX_WBITS | 32)

                        curl.setopt(pycurl.WRITEFUNCTION, self._write_to_fifo(self.script_proc.stdin))
                    else:
                        curl.setopt(pycurl.WRITEFUNCTION, self._write_to_fifo(target_file))

                    if self.task.data['scheme'] == 'hdfs':
                        curl.setopt(pycurl.FOLLOWLOCATION, True)

                    self.logger.info('Starting download')
                    with self.task.protect():
                        self.task.start_step('download')

                    try:
                        curl.perform()
                        status_code = curl.getinfo(pycurl.HTTP_CODE)
                        # Catch HTTP client errors, e.g. 404:
                        if status_code >= 400 and status_code < 500:
                            raise WorkerException('HTTP status code %s for file %s' % (status_code, self.key.name))

                        # If we're piping data through a script, catch timeouts and return codes
                        if self.script_proc is not None:
                            self.script_proc.stdin.close()
                            for i in range(SCRIPT_EXIT_TIMEOUT):
                                if self.script_proc.poll() is not None:
                                    break

                                time.sleep(1)
                            else:
                                self.logger.error('Script `%s` failed to exit...killing' % self.job.spec.options.script)
                                self.script_proc.kill()
                                raise WorkerException('Script `%s` failed to exit after %d seconds' % (self.job.spec.options.script, SCRIPT_EXIT_TIMEOUT))

                            if self.script_proc.returncode != 0:
                                self.logger.error('Script `%s` exited with return code %d' % (self.job.spec.options.script, self.script_proc.returncode))
                                raise WorkerException('Script `%s` exited with return code %d' % (self.job.spec.options.script, self.script_proc.returncode))
                    finally:
                        with self.task.protect():
                            self.task.stop_step('download')

                            if self.script_proc is not None and self.script_proc.returncode is None:
                                try:
                                    self.script_proc.kill()
                                except OSError as e:
                                    self.logger.warn("Failed to kill script `%s`: %s" % (self.job.spec.options.script, str(e)))
            except pycurl.error as e:
                errno = e.args[0]
                if errno in (pycurl.E_WRITE_ERROR, pycurl.E_ABORTED_BY_CALLBACK):
                    if self.pycurl_callback_exception is not None:
                        raise self.pycurl_callback_exception
                    elif self._should_exit:
                        self.logger.warn('Download failed...requeueing')
                        # Caught by the outer `except Exception as e`
                        raise RequeueTask()

                # Caught by the outer `except pycurl.error as e`
                raise
        except pycurl.error as e:
            errno = e.args[0]
            self._set_error(ConnectionException('libcurl error #%d. Lookup error here: http://curl.haxx.se/libcurl/c/libcurl-errors.html' % errno))
        except IOError as e:
            # This is raised sometimes instead of a pycurl error
            self._set_error(ConnectionException('IOError: %s (%d)' % (e.args[1], e.args[0])))
        except Exception as e:
            self._set_error(e)
        except KeyboardInterrupt:
            pass
        finally:
            self.logger.info('Finished downloading')

Example 155

Project: Moa Source File: openLavaActor.py
@moa.actor.async
def openlavaRunner(wd, cl, conf={}, **kwargs):
    """
    Run the job using OPENLAVA

    what does this function do?
    - put env in the environment
    - Execute the commandline (in cl)
    - store stdout & stderr in log files
    - return the rc
    """

    #see if we can get a command
    command = kwargs.get('command', 'unknown')
    if command == 'unknown':
        l.critical("runner should be called with a command")
        sys.exit(-1)

    l.debug("starting openlava actor for %s" % command)

    # this is a trick to get the real path of the log dir - but not of
    # any underlying directory - in case paths are mounted differently
    # on different hosts
    outDir = os.path.abspath(os.path.join(wd, '.moa', 'log.latest'))
    outDir = outDir.rsplit('.moa', 1)[0] + '.moa' + \
        os.path.realpath(outDir).rsplit('.moa', 1)[1]

    sysConf.job.data.openlava.outDir = outDir

    if not os.path.exists(outDir):
        try:
            os.makedirs(outDir)
        except OSError:
            pass

    #expect the cl to be nothing more than a single script to execute
    outfile = os.path.join(outDir, 'stdout')
    errfile = os.path.join(outDir, 'stderr')

    sysConf.job.data.openlava.outfile = outfile
    sysConf.job.data.openlava.errfile = errfile

    bsub_cl = ['bsub']

    sc = []

    def s(*cl):
        sc.append(" ".join(map(str, cl)))

    s("#!/bin/bash")
    s("#BSUB -o %s" % outfile)
    s("#BSUB -e %s" % errfile)
    s("#BSUB -q %s" % sysConf.args.openlavaQueue)

    if '--oln' in sys.argv:
        procs = sysConf.args.openlavaProcs
    else:
        procs = sysConf.job.conf.get('threads', sysConf.args.openlavaProcs)

    s("#BSUB -C %d" % procs)

    if sysConf.args.openlavaExtra.strip():
        s("#BSUB %s" % sysConf.args.openlavaExtra)

    if '--olm' in sys.argv:
        s("#BSUB -m %s" % sysConf.args.openlavaHost)
        #bsub_cl.extend(["-m", sysConf.args.openlavaHost])

    if command == 'run':
        prep_jids = sysConf.job.data.openlava.jids.get('prepare', [])
        #hold until the 'prepare' jobs are done
        #l.critical("Prepare jids - wait for these! %s" % prep_jids)
        for j in prep_jids:
            s("#BSUB -w 'done(%d)'" % j)
            #bsub_cl.extend(["-w", "'done(%d)'" % j])

    elif command == 'finish':
        run_jids = sysConf.job.data.openlava.jids.get('run', [])
        #hold until the 'prepare' jobs are done
        for j in run_jids:
            s("#BSUB -w 'done(%d)'" % j)
            #bsub_cl.extend(["-w", "'done(%d)'" % j])

    #give it a reasonable name
    jobname = ("%s_%s" % (wd.split('/')[-1], command[0]))
    bsub_cl.extend(['-J', jobname])
    s("#BSUB -J '%s'" % jobname)

    #dump the configuration in the environment
    s("")
    s("## ensure we're in the correct directory")
    s("cd", wd)


    s("")
    s("## Defining moa specific environment variables")
    s("")

    confkeys = sorted(conf.keys())
    for k in confkeys:
        # to prevent collusion, prepend all env variables
        # with 'moa_'
        if k[0] == '_' or k[:3] == 'moa':
            outk = k
        else:
            outk = 'moa_' + k
        v = conf[k]

        #this should not happen:
        if ' ' in outk:
            continue

        if isinstance(v, list):
            s("%s='%s'" % (outk, " ".join(v)))
        elif isinstance(v, dict):
            continue
        else:
            s("%s='%s'" % (outk, v))

    s("")
    s("## Run the command")
    s("")

    s(*cl)

    if sysConf.args.openlavaDummy:
        # Dummy mode - do not execute  - just write the script.
        ii = 0
        while True:
            outFile = os.path.join(wd, 'openlava.%s.%d.bash' % (command, ii))
            if not os.path.exists(outFile):
                break
            ii += 1
        with open(outFile, 'w') as F:
            F.write("\n".join(sc))
            moa.ui.message("Created openlava submit script: %s" %
                           outFile.rsplit('/', 1)[1])

            moa.ui.message("now run:")
            moa.ui.message("   %s < %s" % ((" ".join(map(str, bsub_cl))),
                                           outFile.rsplit('/', 1)[1]))
            return 0

    tmpfile = _writeOlTmpFile(wd, sc)

    moa.ui.message("Running %s:" % " ".join(map(str, bsub_cl)))
    moa.ui.message("(copy of) the bsub script: %s" % tmpfile)
    p = sp.Popen(map(str, bsub_cl), cwd=wd, stdout=sp.PIPE, stdin=sp.PIPE)
    o, e = p.communicate("\n".join(sc))

    jid = int(o.split("<")[1].split(">")[0])

    moa.ui.message("Submitted a job to openlava with id %d" % jid)

    if not sysConf.job.data.openlava.jids.get(command):
        sysConf.job.data.openlava.jids[command] = []

    #moa.ui.message("submitted job with openlava job id %s " % jid)

    #store the job id submitted
    if not sysConf.job.data.openlava.jids.get(command):
            sysConf.job.data.openlava.jids[command] = []
    if not sysConf.job.data.openlava.get('alljids'):
            sysConf.job.data.openlava.alljids = []
    sysConf.job.data.openlava.jids[command].append(jid)
    sysConf.job.data.openlava.alljids.append(jid)
    l.debug("jids stored %s" % str(sysConf.job.data.openlava.jids))
    return p.returncode

Example 156

Project: webassets Source File: compass.py
    def open(self, out, source_path, **kw):
        """Compass currently doesn't take data from stdin, and doesn't allow
        us accessing the result from stdout either.

        Also, there's a bunch of other issues we need to work around:

         - compass doesn't support given an explict output file, only a
           "--css-dir" output directory.

           We have to "guess" the filename that will be created in that
           directory.

         - The output filename used is based on the input filename, and
           simply cutting of the length of the "sass_dir" (and changing
           the file extension). That is, compass expects the input
           filename to always be inside the "sass_dir" (which defaults to
           ./src), and if this is not the case, the output filename will
           be gibberish (missing characters in front). See:
           https://github.com/chriseppstein/compass/issues/304

           We fix this by setting the proper --sass-dir option.

         - Compass insists on creating a .sass-cache folder in the
           current working directory, and unlike the sass executable,
           there doesn't seem to be a way to disable it.

           The workaround is to set the working directory to our temp
           directory, so that the cache folder will be deleted at the end.
        """

        # Create temp folder one dir below output_path so sources in
        # sourcemap are correct. This will be in the project folder,
        # and as such, while exteremly unlikely, this could interfere
        # with existing files and directories.
        tempout_dir = path.normpath(
            path.join(path.dirname(kw['output_path']), '../')
        )
        tempout = tempfile.mkdtemp(dir=tempout_dir)
        # Temporarily move to "tempout", so .sass-cache will be created there
        old_wd = os.getcwd()
        os.chdir(tempout)
        try:
            # Make sure to use normpath() to not cause trouble with
            # compass' simplistic path handling, where it just assumes
            # source_path is within sassdir, and cuts off the length of
            # sassdir from the input file.
            sassdir = path.normpath(path.dirname(source_path))
            source_path = path.normpath(source_path)

            # Compass offers some helpers like image-url(), which need
            # information about the urls under which media files will be
            # available. This is hard for two reasons: First, the options in
            # question aren't supported on the command line, so we need to write
            # a temporary config file. Secondly, they assume defined and
            # separate directories for "images", "stylesheets" etc., something
            # webassets knows nothing of: we don't support the user defining
            # such directories. Because we traditionally had this
            # filter point all type-specific directories to the root media
            # directory, we will define the paths to match this. In other
            # words, in Compass, both inline-image("img/test.png) and
            # image-url("img/test.png") will find the same file, and assume it
            # to be {env.directory}/img/test.png.
            # However, this partly negates the purpose of an utility like
            # image-url() in the first place - you not having to hard code
            # the location of your images. So we allow direct modification of
            # the configuration file via the COMPASS_CONFIG setting (see
            # tickets #36 and #125).
            #
            # Note that there is also the --relative-assets option, which we
            # can't use because it calculates an actual relative path between
            # the image and the css output file, the latter being in a
            # temporary directory in our case.
            config = CompassConfig(
                project_path=self.ctx.directory,
                http_path=self.ctx.url,
                http_images_dir='',
                http_stylesheets_dir='',
                http_fonts_dir='',
                http_javascripts_dir='',
                images_dir='',
                output_style=':expanded',
            )
            # Update with the custom config dictionary, if any.
            if self.config:
                config.update(self.config)
            config_file = path.join(tempout, '.config.rb')
            f = open(config_file, 'w')
            try:
                f.write(config.to_string())
                f.flush()
            finally:
                f.close()

            command = [self.compass or 'compass', 'compile']
            for plugin in self.plugins or []:
                command.extend(('--require', plugin))
            command.extend(['--sass-dir', sassdir,
                            '--css-dir', tempout,
                            '--config', config_file,
                            '--quiet',
                            '--boring',
                            source_path])
            proc = subprocess.Popen(command,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    # shell: necessary on windows to execute
                                    # ruby files, but doesn't work on linux.
                                    shell=(os.name == 'nt'))
            stdout, stderr = proc.communicate()

            # compass seems to always write a utf8 header? to stderr, so
            # make sure to not fail just because there's something there.
            if proc.returncode != 0:
                raise FilterError(('compass: subprocess had error: stderr=%s, '+
                                   'stdout=%s, returncode=%s') % (
                                                stderr, stdout, proc.returncode))

            guessed_outputfilename = path.splitext(path.basename(source_path))[0]
            guessed_outputfilepath = path.join(tempout, guessed_outputfilename)
            output_file = open("%s.css" % guessed_outputfilepath, encoding='utf-8')
            if config.get('sourcemap'):
                sourcemap_file = open("%s.css.map" % guessed_outputfilepath)
                sourcemap_output_filepath = path.join(
                    path.dirname(kw['output_path']),
                    path.basename(sourcemap_file.name)
                )
                if not path.exists(path.dirname(sourcemap_output_filepath)):
                    os.mkdir(path.dirname(sourcemap_output_filepath))
                sourcemap_output_file = open(sourcemap_output_filepath, 'w')
                sourcemap_output_file.write(sourcemap_file.read())
                sourcemap_file.close()
            try:
                contents = output_file.read()
                out.write(contents)
            finally:
                output_file.close()
        finally:
            # Restore previous working dir
            os.chdir(old_wd)
            # Clean up the temp dir
            shutil.rmtree(tempout)

Example 157

Project: ns3-mmwave Source File: shellcmd.py
Function: run
    def run(self, verbose=False):
        pipeline = list(self.pipeline)
        files_to_close = []
        piped_commands = []
        piped_commands_display = []
        BEGIN, PIPE = list(range(2))
        state = BEGIN
        cwd = '.'
        while pipeline:
            node = pipeline.pop(0)

            if isinstance(node, Chdir):
                next_op = pipeline.pop(0)
                assert isinstance(next_op, And)
                cwd = os.path.join(cwd, node.dir)
                if verbose:
                    piped_commands_display.append("cd %s &&" % node.dir)
                continue
            
            assert isinstance(node, (Command, Chdir))
            cmd = node
            if verbose:
                if cmd.env_vars:
                    env_vars_str = ' '.join(['%s=%s' % (key, val) for key, val in cmd.env_vars.items()])
                    piped_commands_display.append("%s %s" % (env_vars_str, ' '.join(cmd.argv)))
                else:
                    piped_commands_display.append(' '.join(cmd.argv))

            if state == PIPE:
                stdin = piped_commands[-1].stdout
            elif cmd.stdin is not None:
                stdin = open(cmd.stdin, "r")
                if verbose:
                    piped_commands_display.append('< %s' % cmd.stdin)
                files_to_close.append(stdin)
            else:
                stdin = None

            if cmd.stdout is None:
                stdout = None
            elif cmd.stdout is Command.PIPE:
                stdout = subprocess.PIPE
            else:
                stdout = _open_out_file(cmd.stdout)
                files_to_close.append(stdout)
                if verbose:
                    piped_commands_display.append('> %s' % cmd.stdout)

            if cmd.stderr is None:
                stderr = None
            elif cmd.stderr is Command.PIPE:
                stderr = subprocess.PIPE
            elif cmd.stderr is Command.STDOUT:
                stderr = subprocess.STDOUT
                if verbose:
                    piped_commands_display.append('2>&1')
            else:
                stderr = _open_out_file(cmd.stderr)
                files_to_close.append(stderr)
                if verbose:
                    piped_commands_display.append('2> %s' % cmd.stderr)

            if cmd.env_vars:
                env = dict(os.environ)
                env.update(cmd.env_vars)
            else:
                env = None

            if cwd == '.':
                proc_cwd = None
            else:
                proc_cwd = cwd

            debug("command: subprocess.Popen(argv=%r, stdin=%r, stdout=%r, stderr=%r, env_vars=%r, cwd=%r)"
                  % (cmd.argv, stdin, stdout, stderr, cmd.env_vars, proc_cwd))
            proc = subprocess.Popen(cmd.argv, stdin=stdin, stdout=stdout, stderr=stderr, env=env, cwd=proc_cwd)
            del stdin, stdout, stderr
            piped_commands.append(proc)

            try:
                next_node = pipeline.pop(0)
            except IndexError:
                try:
                    retval = self._exec_piped_commands(piped_commands)
                    if verbose:
                        print("%s: exit code %i" % (' '.join(piped_commands_display), retval))
                finally:
                    for f in files_to_close:
                        if f is not dev_null:
                            f.close()
                    files_to_close = []
                return retval
            else:

                if isinstance(next_node, Pipe):
                    state = PIPE
                    piped_commands_display.append('|')

                elif isinstance(next_node, Or):
                    try:
                        this_retval = self._exec_piped_commands(piped_commands)
                    finally:
                        for f in files_to_close:
                            if f is not dev_null:
                                f.close()
                        files_to_close = []
                    if this_retval == 0:
                        if verbose:
                            print("%s: exit code %i (|| is short-circuited)" % (' '.join(piped_commands_display), retval))
                        return this_retval
                    if verbose:
                        print("%s: exit code %i (|| proceeds)" % (' '.join(piped_commands_display), retval))
                    state = BEGIN
                    piped_commands = []
                    piped_commands_display = []

                elif isinstance(next_node, And):
                    try:
                        this_retval = self._exec_piped_commands(piped_commands)
                    finally:
                        for f in files_to_close:
                            if f is not dev_null:
                                f.close()
                        files_to_close = []
                    if this_retval != 0:
                        if verbose:
                            print("%s: exit code %i (&& is short-circuited)" % (' '.join(piped_commands_display), retval))
                        return this_retval
                    if verbose:
                        print("%s: exit code %i (&& proceeds)" % (' '.join(piped_commands_display), retval))
                    state = BEGIN
                    piped_commands = []
                    piped_commands_display = []

Example 158

Project: HoneyConnector Source File: process.py
def launch_tor(tor_cmd = "tor", args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False):
  """
  Initializes a tor process. This blocks until initialization completes or we
  error out.

  If tor's data directory is missing or stale then bootstrapping will include
  making several requests to the directory authorities which can take a little
  while. Usually this is done in 50 seconds or so, but occasionally calls seem
  to get stuck, taking well over the default timeout.

  **To work to must log at NOTICE runlevel to stdout.** It does this by
  default, but if you have a 'Log' entry in your torrc then you'll also need
  'Log NOTICE stdout'.

  Note: The timeout argument does not work on Windows, and relies on the global
  state of the signal module.

  :param str tor_cmd: command for starting tor
  :param list args: additional arguments for tor
  :param str torrc_path: location of the torrc for us to use
  :param int completion_percent: percent of bootstrap completion at which
    this'll return
  :param functor init_msg_handler: optional functor that will be provided with
    tor's initialization stdout as we get it
  :param int timeout: time after which the attempt to start tor is aborted, no
    timeouts are applied if **None**
  :param bool take_ownership: asserts ownership over the tor process so it
    aborts if this python process terminates or a :class:`~stem.control.Controller`
    we establish to it disconnects

  :returns: **subprocess.Popen** instance for the tor subprocess

  :raises: **OSError** if we either fail to create the tor process or reached a
    timeout without success
  """

  if stem.util.system.is_windows():
    timeout = None

  # sanity check that we got a tor binary

  if os.path.sep in tor_cmd:
    # got a path (either relative or absolute), check what it leads to

    if os.path.isdir(tor_cmd):
      raise OSError("'%s' is a directory, not the tor executable" % tor_cmd)
    elif not os.path.isfile(tor_cmd):
      raise OSError("'%s' doesn't exist" % tor_cmd)
  elif not stem.util.system.is_available(tor_cmd):
    raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd)

  # double check that we have a torrc to work with
  if not torrc_path in (None, NO_TORRC) and not os.path.exists(torrc_path):
    raise OSError("torrc doesn't exist (%s)" % torrc_path)

  # starts a tor subprocess, raising an OSError if it fails
  runtime_args, temp_file = [tor_cmd], None

  if args:
    runtime_args += args

  if torrc_path:
    if torrc_path == NO_TORRC:
      temp_file = tempfile.mkstemp(prefix = "empty-torrc-", text = True)[1]
      runtime_args += ["-f", temp_file]
    else:
      runtime_args += ["-f", torrc_path]

  if take_ownership:
    runtime_args += ["__OwningControllerProcess", str(os.getpid())]

  tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)

  if timeout:
    def timeout_handler(signum, frame):
      # terminates the uninitialized tor process and raise on timeout
      if temp_file:
        try:
          os.remove(temp_file)
        except:
          pass

      # We can't kill the subprocess on python 2.5 running Windows without the
      # win32process module...
      # http://stackoverflow.com/questions/552423/use-python-2-6-subprocess-module-in-python-2-5/552510#552510

      if stem.prereq.is_python_26():
        tor_process.kill()
      elif not stem.util.system.is_windows():
        os.kill(tor_process.pid, signal.SIGTERM)

      raise OSError("reached a %i second timeout without success" % timeout)

    signal.signal(signal.SIGALRM, timeout_handler)
    signal.alarm(timeout)

  bootstrap_line = re.compile("Bootstrapped ([0-9]+)%: ")
  problem_line = re.compile("\[(warn|err)\] (.*)$")
  last_problem = "Timed out"

  while True:
    # Tor's stdout will be read as ASCII bytes. This is fine for python 2, but
    # in python 3 that means it'll mismatch with other operations (for instance
    # the bootstrap_line.search() call later will fail).
    #
    # It seems like python 2.x is perfectly happy for this to be unicode, so
    # normalizing to that.

    init_line = tor_process.stdout.readline().decode("utf-8", "replace").strip()

    # this will provide empty results if the process is terminated
    if not init_line:
      if timeout:
        signal.alarm(0)  # stop alarm

      # ... but best make sure
      if stem.prereq.is_python_26():
        tor_process.kill()
      elif not stem.util.system.is_windows():
        os.kill(tor_process.pid, signal.SIGTERM)

      raise OSError("Process terminated: %s" % last_problem)

    # provide the caller with the initialization message if they want it

    if init_msg_handler:
      init_msg_handler(init_line)

    # return the process if we're done with bootstrapping
    bootstrap_match = bootstrap_line.search(init_line)
    problem_match = problem_line.search(init_line)

    if bootstrap_match and int(bootstrap_match.groups()[0]) >= completion_percent:
      if timeout:
        signal.alarm(0)  # stop alarm

      if temp_file:
        try:
          os.remove(temp_file)
        except:
          pass

      return tor_process
    elif problem_match:
      runlevel, msg = problem_match.groups()

      if not "see warnings above" in msg:
        if ": " in msg:
          msg = msg.split(": ")[-1].strip()

        last_problem = msg

Example 159

Project: flopy Source File: mbase.py
def run_model(exe_name, namefile, model_ws='./',
              silent=False, pause=False, report=False,
              normal_msg='normal termination',
              async=False):
    """
    This function will run the model using subprocess.Popen.  It
    communicates with the model's stdout asynchronously and reports
    progress to the screen with timestamps

    Parameters
    ----------
    exe_name : str
        Executable name (with path, if necessary) to run.
    namefile : str
        Namefile of model to run. The namefile must be the
        filename of the namefile without the path.
    model_ws : str
        Path to the location of the namefile. (default is the
        current working directory - './')
    silent : boolean
        Echo run information to screen (default is True).
    pause : boolean, optional
        Pause upon completion (default is False).
    report : boolean, optional
        Save stdout lines to a list (buff) which is returned
        by the method . (default is False).
    normal_msg : str
        Normal termination message used to determine if the
        run terminated normally. (default is 'normal termination')
    async : boolean
        asynchonously read model stdout and report with timestamps.  good for
        models that take long time to run.  not good for models that run
        really fast
    Returns
    -------
    (success, buff)
    success : boolean
    buff : list of lines of stdout

    """
    success = False
    buff = []

    # Check to make sure that program and namefile exist
    exe = which(exe_name)
    if exe is None:
        import platform
        if platform.system() in 'Windows':
            if not exe_name.lower().endswith('.exe'):
                exe = which(exe_name + '.exe')
    if exe is None:
        s = 'The program {} does not exist or is not executable.'.format(
            exe_name)
        raise Exception(s)
    else:
        if not silent:
            s = 'FloPy is using the following executable to run the model: {}'.format(
                exe)
            print(s)

    if not os.path.isfile(os.path.join(model_ws, namefile)):
        s = 'The namefile for this model does not exists: {}'.format(namefile)
        raise Exception(s)

    # simple little function for the thread to target
    def q_output(output,q):
            for line in iter(output.readline,b''):
                q.put(line)
            #time.sleep(1)
            #output.close()

    proc = sp.Popen([exe_name, namefile],
                    stdout=sp.PIPE, cwd=model_ws)

    if not async:
        while True:
            line = proc.stdout.readline()
            c = line.decode('utf-8')
            if c != '':
                if normal_msg in c.lower():
                    success = True
                c = c.rstrip('\r\n')
                if not silent:
                    print('{}'.format(c))
                if report == True:
                    buff.append(c)
            else:
                break
        return success, buff


    #some tricks for the async stdout reading
    q = Queue.Queue()
    thread = threading.Thread(target=q_output,args=(proc.stdout,q))
    thread.daemon = True
    thread.start()

    failed_words = ["fail","error"]
    last = datetime.now()
    lastsec = 0.
    while True:
        try:
            line = q.get_nowait()
        except Queue.Empty:
            pass
        else:
            if line == '':
                break
            line = line.decode().lower().strip()
            if line != '':
                now = datetime.now()
                dt = now - last
                tsecs = dt.total_seconds() - lastsec
                line = "(elapsed:{0})-->{1}".format(tsecs,line)
                lastsec = tsecs + lastsec
                buff.append(line)
                if not silent:
                    print(line)
                for fword in failed_words:
                    if fword in line:
                        success = False
                        break
        if proc.poll() is not None:
            break
    proc.wait()
    thread.join(timeout=1)
    buff.extend(proc.stdout.readlines())
    proc.stdout.close()

    for line in buff:
        if normal_msg in line:
            print("success")
            success = True
            break

    if pause:
        input('Press Enter to continue...')
    return success, buff

Example 160

Project: otr-verwaltung Source File: Mkv.py
    def on_mkv_clicked(self, widget, data=None):
        filenames = self.gui.main_window.get_selected_filenames()

        if len(filenames) == 0:
            self.gui.message_error_box("Es muss eine Datei markiert sein.")
            return

        self.toolbutton.set_sensitive(False)
        self.gui.main_window.set_tasks_visible(True)
        self.success = 0
        self.errors ={}
                
        def mkvmerge():
            # env
            my_env = os.environ.copy()
            my_env["LANG"] = "C"
            
            for count, filename in enumerate(filenames):
                yield 0, count
                yield 3, 0
                self.progress = 0

                #analyse file
                cutter = Cut(self.app, self.gui)
                fps, dar, sar, max_frames, ac3_stream, error = cutter.analyse_mediafile(filename)
                if fps == None:
                    self.errors[filename] = error
                    continue

                # encode aac with ffmpeg
                if self.Config['EncodeAudioToAAC']:
                    #norm volume ausrechnen
                    yield 5, count
                    if self.Config['NormalizeAudio'] and self.Config['EncodeAudioToAAC']:
                        vol, error = self.get_norm_volume(filename)
                    else:
                        vol = 1.0

                    # ffmpeg pass               
                    yield 1, count
                    self.progress = 0
                    ffmpegpass_file = fileoperations.make_unique_filename(os.path.splitext(filename)[0] + "_remux.mkv")

                    # convert first audio stream to aac
                    if self.Config['EncodeOnlyFirstAudioToAAC']:
                        aacaudiostreams = '-c:a:0'
                    else:
                        aacaudiostreams = '-c:a'

                    # convert first audio stream to aac
                    ffmpeg = self.app.config.get_program('ffmpeg')
                    if 'nonfree' in ffmpeg:
                        # nonfree ffmpeg version with fdk support available
                        audiocodec = ['-c:a',  'copy',  aacaudiostreams,  'libfdk_aac',  '-flags',  '+qscale',  '-profile:a:0',  'aac_low',  '-global_quality',  '5' ,'-afterburner',  '1']
                    else:
                        # only gpl version of ffmpeg available -> use standard aac codec
                        audiocodec = ['-c:a',  'copy',  aacaudiostreams,  'aac', '-strict', '-2','-profile:a:0',  'aac_low',  '-ab' ,'192k',  '-cutoff',  '18000']

                    if self.Config['DownMixStereo'] and self.Config['EncodeAudioToAAC']:
                        audiocodec.extend(['-ac:0',  '2'])

                    if ac3_stream == None:
                        # no ac3 stream found - all streams are muxed 
                        map = ['-map',  '0']
                    else:
                        if self.Config['RemoveOtherAudioStreamsThanAC3']:
                            # mux only video and ac3 stream
                            map = ['-map',  '0:v',  '-map',  ac3_stream]
                        else:
                            map = ['-map' ,'0']

                    args = [ffmpeg, "-loglevel", "info", "-y", "-drc_scale", "1.0", "-i", filename, "-vn", '-af', 'volume=volume=' + str(vol), "-vsync", "1", '-async',  '1000',  "-dts_delta_threshold", "100", "-vf", "fps="+ str(fps), '-threads',  '0',   ffmpegpass_file]
                    map.extend(audiocodec)
                    args[8:8] = map
                
                    try:
                        p = subprocess.Popen(args, stderr=subprocess.PIPE, universal_newlines=True)
                    except OSError:
                        self.errors[filename] = "FFMPEG (intern) wurde nicht gefunden!"            
                        continue

                    yield 4, 0
                    line = ""
                    infos_match = re.compile(r"time=(\d{2,}):(\d{2,}):(\d{2,}.\d{2,})")

                    while p.poll() == None:
                        line = p.stderr.readline()
                        m = re.search(infos_match,line)
                        if m and max_frames != 0:
                            frame = (float(m.group(1))*3600 + float(m.group(2))*60 + float(m.group(3)))*fps
                            next = float( frame / float(max_frames) ) * 100
                            if next > self.progress:
                                self.progress = next
                                yield 4, self.progress                                
                        else:
                            pass
                
                    exit_code = p.poll()

                    if exit_code == 0:
                        pass
                    else:
                        self.errors[filename] = "Fehler beim Erzeugen der MP4 Datei durch FFMPEG"
                        if os.path.exists(ffmpegpass_file):
                            fileoperations.remove_file(ffmpegpass_file)
                        continue

                # mkvmerge pass
                yield 2, count
                self.progress = 0

                mkvpass_file = fileoperations.make_unique_filename(os.path.splitext(filename)[0] + ".mkv")

                if self.Config['EncodeAudioToAAC']:
                    args = [self.app.config.get_program('mkvmerge'), '--engage', 'no_cue_duration', '--engage',  'no_cue_relative_position', '--ui-language',  'en_US',"-o", mkvpass_file, '-A',  filename, '-D',   ffmpegpass_file]
                else:
                    if self.Config['RemoveOtherAudioStreamsThanAC3'] and ac3_stream:
                        args = [self.app.config.get_program('mkvmerge'), '--engage', 'no_cue_duration', '--engage',  'no_cue_relative_position', '--ui-language',  'en_US', "-o", mkvpass_file, '-a',  ac3_stream[2],  filename]
                    else:
                        args = [self.app.config.get_program('mkvmerge'),  '--engage', 'no_cue_duration', '--engage',  'no_cue_relative_position', '--ui-language',  'en_US', "-o", mkvpass_file, filename]

                p = subprocess.Popen(args, stdout=subprocess.PIPE,  env=my_env)
                p.stdout.readline()

                line = ""                            
                while p.poll() == None:
                    # read progress from stdout 
                    char = p.stdout.read(1)
                    line += char
                    progress = ''
                    if char == ':':
                        if "Error" in line or "Warning" in line:                            
                            break
                    
                        while char != '%':
                            char = p.stdout.read(1)
                            progress += char
                      
                        try:
                            self.progress = int(progress.strip(' %'))
                            yield 3, self.progress
                        except ValueError:
                            pass
                
                exit_code = p.poll()

                if exit_code == 0 or exit_code == 1:
                    self.success += 1
                    if self.Config['EncodeAudioToAAC']:
                        fileoperations.remove_file(ffmpegpass_file)
                    if self.Config['DumpAVIs']:
                        if self.Config['DumpAVIs_delete']:
                            fileoperations.remove_file(filename)
                        else:
                            new_filename = os.path.join(self.app.config.get('general', 'folder_trash_avis'), os.path.basename(filename))
                            if os.path.exists(new_filename):
                                fileoperations.remove_file(new_filename)
                            fileoperations.move_file(filename, self.app.config.get('general', 'folder_trash_avis'))
                else:
                    error = p.stdout.readline()
                    try:
                        error = error.split(":")[1]
                    except IndexError:
                        pass
                        
                    if "unknown type" in error:
                        error = "Datei konnte nicht gelesen werden."
                    self.errors[filename] = error
                  
        def loop(state, argument):            
            if state == 0:
                self.gui.main_window.set_tasks_text("Analysiere Datei ... %s/%s" % (str(argument + 1), str(len(filenames))))
            elif state == 1:
                self.gui.main_window.set_tasks_text("Audiospur in AAC wandeln ... %s/%s" % (str(argument + 1), str(len(filenames))))
            elif state == 2:
                self.gui.main_window.set_tasks_text("MKV erstellen ...  %s/%s" % (str(argument + 1), str(len(filenames))))
            elif state == 5:
                self.gui.main_window.set_tasks_text("Normalisierungswert berechnen ... %s/%s" % (str(argument + 1), str(len(filenames))))
            else:                
                self.gui.main_window.set_tasks_progress(argument)
        
        def complete():
            if len(self.errors) == 0:
                self.gui.main_window.change_status(0, "Erfolgreich %s/%s Dateien umgewandelt." % (str(self.success), str(len(filenames))))
            else:
                self.gui.main_window.change_status(0, "Erfolgreich %s/%s Dateien umgewandelt. (Fehler: %s)" % (str(self.success), str(len(filenames)), " ".join(self.errors.values())))
            
            self.gui.main_window.set_tasks_visible(False)                
            if self.success > 0:
                self.app.show_section(self.app.section)
            self.toolbutton.set_sensitive(True)
                        
        GeneratorTask(mkvmerge, loop, complete).start() 

Example 161

Project: dashman Source File: tx.py
def main():
    parser = argparse.ArgumentParser(
        description="Manipulate bitcoin (or alt coin) transactions.",
        epilog=EPILOG)

    parser.add_argument('-t', "--transaction-version", type=int,
                        help='Transaction version, either 1 (default) or 3 (not yet supported).')

    parser.add_argument('-l', "--lock-time", type=parse_locktime, help='Lock time; either a block'
                        'index, or a date/time (example: "2014-01-01T15:00:00"')

    parser.add_argument('-n', "--network", default="BTC",
                        help='Define network code (M=Bitcoin mainnet, T=Bitcoin testnet).')

    parser.add_argument('-a', "--augment", action='store_true',
                        help='augment tx by adding any missing spendable metadata by fetching'
                             ' inputs from cache and/or web services')

    parser.add_argument('-s', "--verbose-signature", action='store_true',
                        help='Display technical signature details.')

    parser.add_argument("-i", "--fetch-spendables", metavar="address", action="append",
                        help='Add all unspent spendables for the given bitcoin address. This information'
                        ' is fetched from web services.')

    parser.add_argument('-f', "--private-key-file", metavar="path-to-private-keys", action="append",
                        help='file containing WIF or BIP0032 private keys. If file name ends with .gpg, '
                        '"gpg -d" will be invoked automatically. File is read one line at a time, and if '
                        'the file contains only one WIF per line, it will also be scanned for a bitcoin '
                        'address, and any addresses found will be assumed to be public keys for the given'
                        ' private key.',
                        type=argparse.FileType('r'))

    parser.add_argument('-g', "--gpg-argument", help='argument to pass to gpg (besides -d).', default='')

    parser.add_argument("--remove-tx-in", metavar="tx_in_index_to_delete", action="append", type=int,
                        help='remove a tx_in')

    parser.add_argument("--remove-tx-out", metavar="tx_out_index_to_delete", action="append", type=int,
                        help='remove a tx_out')

    parser.add_argument('-F', "--fee", help='fee, in satoshis, to pay on transaction, or '
                        '"standard" to auto-calculate. This is only useful if the "split pool" '
                        'is used; otherwise, the fee is automatically set to the unclaimed funds.',
                        default="standard", metavar="transaction-fee", type=parse_fee)

    parser.add_argument('-C', "--cache", help='force the resultant transaction into the transaction cache.'
                        ' Mostly for testing.', action='store_true'),

    parser.add_argument('-u', "--show-unspents", action='store_true',
                        help='show TxOut items for this transaction in Spendable form.')

    parser.add_argument('-b', "--bitcoind-url",
                        help='URL to bitcoind instance to validate against (http://user:pass@host:port).')

    parser.add_argument('-o', "--output-file", metavar="path-to-output-file", type=argparse.FileType('wb'),
                        help='file to write transaction to. This supresses most other output.')

    parser.add_argument('-p', "--pay-to-script", metavar="pay-to-script", action="append",
                        help='a hex version of a script required for a pay-to-script input (a bitcoin address that starts with 3)')

    parser.add_argument('-P', "--pay-to-script-file", metavar="pay-to-script-file", nargs=1, type=argparse.FileType('r'),
                        help='a file containing hex scripts (one per line) corresponding to pay-to-script inputs')

    parser.add_argument("argument", nargs="+", help='generic argument: can be a hex transaction id '
                        '(exactly 64 characters) to be fetched from cache or a web service;'
                        ' a transaction as a hex string; a path name to a transaction to be loaded;'
                        ' a spendable 4-tuple of the form tx_id/tx_out_idx/script_hex/satoshi_count '
                        'to be added to TxIn list; an address/satoshi_count to be added to the TxOut '
                        'list; an address to be added to the TxOut list and placed in the "split'
                        ' pool".')

    args = parser.parse_args()

    # defaults

    txs = []
    spendables = []
    payables = []

    key_iters = []

    TX_ID_RE = re.compile(r"^[0-9a-fA-F]{64}$")

    # there are a few warnings we might optionally print out, but only if
    # they are relevant. We don't want to print them out multiple times, so we
    # collect them here and print them at the end if they ever kick in.

    warning_tx_cache = None
    warning_get_tx = None
    warning_spendables = None

    if args.private_key_file:
        wif_re = re.compile(r"[1-9a-km-zA-LMNP-Z]{51,111}")
        # address_re = re.compile(r"[1-9a-kmnp-zA-KMNP-Z]{27-31}")
        for f in args.private_key_file:
            if f.name.endswith(".gpg"):
                gpg_args = ["gpg", "-d"]
                if args.gpg_argument:
                    gpg_args.extend(args.gpg_argument.split())
                gpg_args.append(f.name)
                popen = subprocess.Popen(gpg_args, stdout=subprocess.PIPE)
                f = popen.stdout
            for line in f.readlines():
                # decode
                if isinstance(line, bytes):
                    line = line.decode("utf8")
                # look for WIFs
                possible_keys = wif_re.findall(line)

                def make_key(x):
                    try:
                        return Key.from_text(x)
                    except Exception:
                        return None

                keys = [make_key(x) for x in possible_keys]
                for key in keys:
                    if key:
                        key_iters.append((k.wif() for k in key.subkeys("")))

                # if len(keys) == 1 and key.hierarchical_wallet() is None:
                #    # we have exactly 1 WIF. Let's look for an address
                #   potential_addresses = address_re.findall(line)

    # update p2sh_lookup
    p2sh_lookup = {}
    if args.pay_to_script:
        for p2s in args.pay_to_script:
            try:
                script = h2b(p2s)
                p2sh_lookup[hash160(script)] = script
            except Exception:
                print("warning: error parsing pay-to-script value %s" % p2s)

    if args.pay_to_script_file:
        hex_re = re.compile(r"[0-9a-fA-F]+")
        for f in args.pay_to_script_file:
            count = 0
            for l in f:
                try:
                    m = hex_re.search(l)
                    if m:
                        p2s = m.group(0)
                        script = h2b(p2s)
                        p2sh_lookup[hash160(script)] = script
                        count += 1
                except Exception:
                    print("warning: error parsing pay-to-script file %s" % f.name)
            if count == 0:
                print("warning: no scripts found in %s" % f.name)

    # we create the tx_db lazily
    tx_db = None

    for arg in args.argument:

        # hex transaction id
        if TX_ID_RE.match(arg):
            if tx_db is None:
                warning_tx_cache = message_about_tx_cache_env()
                warning_get_tx = message_about_get_tx_env()
                tx_db = get_tx_db()
            tx = tx_db.get(h2b_rev(arg))
            if not tx:
                for m in [warning_tx_cache, warning_get_tx, warning_spendables]:
                    if m:
                        print("warning: %s" % m, file=sys.stderr)
                parser.error("can't find Tx with id %s" % arg)
            txs.append(tx)
            continue

        # hex transaction data
        try:
            tx = Tx.from_hex(arg)
            txs.append(tx)
            continue
        except Exception:
            pass

        is_valid = is_address_valid(arg, allowable_netcodes=[args.network])
        if is_valid:
            payables.append((arg, 0))
            continue

        try:
            key = Key.from_text(arg)
            # TODO: check network
            if key.wif() is None:
                payables.append((key.address(), 0))
                continue
            # TODO: support paths to subkeys
            key_iters.append((k.wif() for k in key.subkeys("")))
            continue
        except Exception:
            pass

        if os.path.exists(arg):
            try:
                with open(arg, "rb") as f:
                    if f.name.endswith("hex"):
                        f = io.BytesIO(codecs.getreader("hex_codec")(f).read())
                    tx = Tx.parse(f)
                    txs.append(tx)
                    try:
                        tx.parse_unspents(f)
                    except Exception as ex:
                        pass
                    continue
            except Exception:
                pass

        parts = arg.split("/")
        if len(parts) == 4:
            # spendable
            try:
                spendables.append(Spendable.from_text(arg))
                continue
            except Exception:
                pass

        if len(parts) == 2 and is_address_valid(parts[0], allowable_netcodes=[args.network]):
            try:
                payables.append(parts)
                continue
            except ValueError:
                pass

        parser.error("can't parse %s" % arg)

    if args.fetch_spendables:
        warning_spendables = message_about_spendables_for_address_env()
        for address in args.fetch_spendables:
            spendables.extend(spendables_for_address(address))

    for tx in txs:
        if tx.missing_unspents() and args.augment:
            if tx_db is None:
                warning_tx_cache = message_about_tx_cache_env()
                warning_get_tx = message_about_get_tx_env()
                tx_db = get_tx_db()
            tx.unspents_from_db(tx_db, ignore_missing=True)

    txs_in = []
    txs_out = []
    unspents = []
    # we use a clever trick here to keep each tx_in corresponding with its tx_out
    for tx in txs:
        smaller = min(len(tx.txs_in), len(tx.txs_out))
        txs_in.extend(tx.txs_in[:smaller])
        txs_out.extend(tx.txs_out[:smaller])
        unspents.extend(tx.unspents[:smaller])
    for tx in txs:
        smaller = min(len(tx.txs_in), len(tx.txs_out))
        txs_in.extend(tx.txs_in[smaller:])
        txs_out.extend(tx.txs_out[smaller:])
        unspents.extend(tx.unspents[smaller:])
    for spendable in spendables:
        txs_in.append(spendable.tx_in())
        unspents.append(spendable)
    for address, coin_value in payables:
        script = standard_tx_out_script(address)
        txs_out.append(TxOut(coin_value, script))

    lock_time = args.lock_time
    version = args.transaction_version

    # if no lock_time is explicitly set, inherit from the first tx or use default
    if lock_time is None:
        if txs:
            lock_time = txs[0].lock_time
        else:
            lock_time = DEFAULT_LOCK_TIME

    # if no version is explicitly set, inherit from the first tx or use default
    if version is None:
        if txs:
            version = txs[0].version
        else:
            version = DEFAULT_VERSION

    if args.remove_tx_in:
        s = set(args.remove_tx_in)
        txs_in = [tx_in for idx, tx_in in enumerate(txs_in) if idx not in s]

    if args.remove_tx_out:
        s = set(args.remove_tx_out)
        txs_out = [tx_out for idx, tx_out in enumerate(txs_out) if idx not in s]

    tx = Tx(txs_in=txs_in, txs_out=txs_out, lock_time=lock_time, version=version, unspents=unspents)

    fee = args.fee
    try:
        distribute_from_split_pool(tx, fee)
    except ValueError as ex:
        print("warning: %s" % ex.args[0], file=sys.stderr)

    unsigned_before = tx.bad_signature_count()
    if unsigned_before > 0 and key_iters:
        def wif_iter(iters):
            while len(iters) > 0:
                for idx, iter in enumerate(iters):
                    try:
                        wif = next(iter)
                        yield wif
                    except StopIteration:
                        iters = iters[:idx] + iters[idx+1:]
                        break

        print("signing...", file=sys.stderr)
        sign_tx(tx, wif_iter(key_iters), p2sh_lookup=p2sh_lookup)

    unsigned_after = tx.bad_signature_count()
    if unsigned_after > 0 and key_iters:
        print("warning: %d TxIn items still unsigned" % unsigned_after, file=sys.stderr)

    if len(tx.txs_in) == 0:
        print("warning: transaction has no inputs", file=sys.stderr)

    if len(tx.txs_out) == 0:
        print("warning: transaction has no outputs", file=sys.stderr)

    include_unspents = (unsigned_after > 0)
    tx_as_hex = tx.as_hex(include_unspents=include_unspents)

    if args.output_file:
        f = args.output_file
        if f.name.endswith(".hex"):
            f.write(tx_as_hex.encode("utf8"))
        else:
            tx.stream(f)
            if include_unspents:
                tx.stream_unspents(f)
        f.close()
    elif args.show_unspents:
        for spendable in tx.tx_outs_as_spendable():
            print(spendable.as_text())
    else:
        if not tx.missing_unspents():
            check_fees(tx)
        dump_tx(tx, args.network, args.verbose_signature)
        if include_unspents:
            print("including unspents in hex dump since transaction not fully signed")
        print(tx_as_hex)

    if args.cache:
        if tx_db is None:
            warning_tx_cache = message_about_tx_cache_env()
            warning_get_tx = message_about_get_tx_env()
            tx_db = get_tx_db()
        tx_db.put(tx)

    if args.bitcoind_url:
        if tx_db is None:
            warning_tx_cache = message_about_tx_cache_env()
            warning_get_tx = message_about_get_tx_env()
            tx_db = get_tx_db()
        validate_bitcoind(tx, tx_db, args.bitcoind_url)

    if tx.missing_unspents():
        print("\n** can't validate transaction as source transactions missing", file=sys.stderr)
    else:
        try:
            if tx_db is None:
                warning_tx_cache = message_about_tx_cache_env()
                warning_get_tx = message_about_get_tx_env()
                tx_db = get_tx_db()
            tx.validate_unspents(tx_db)
            print('all incoming transaction values validated')
        except BadSpendableError as ex:
            print("\ncuem ERROR: FEES INCORRECTLY STATED: %s" % ex.args[0], file=sys.stderr)
        except Exception as ex:
            print("\n*** can't validate source transactions as untampered: %s" %
                  ex.args[0], file=sys.stderr)

    # print warnings
    for m in [warning_tx_cache, warning_get_tx, warning_spendables]:
        if m:
            print("warning: %s" % m, file=sys.stderr)

Example 162

Project: ADEL Source File: _dumpFiles.py
def get_SQLite_files(backup_dir, os_version, device_name):
    hash_value_file = backup_dir + "/hash_values.log"
    hash_value = open(hash_value_file, "a+")
    _adel_log.log("\n############  DUMP SQLite FILES  ############\n", 2)
    # Standard applications

    # Accounts database (IMSI, Account_Name, Account_Type, sha1_hash)
    try:
        accountdb = subprocess.Popen(['adb', 'pull', '/data/system/accounts.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        accountdb.wait()
        _adel_log.log("accounts.db -> " + accountdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/accounts.db").hexdigest(), 3)
        hash_value.write("accounts.db -> " + hashlib.sha256(backup_dir + "/accounts.db").hexdigest() + " \n")
    except:
        _adel_log.log("dumpDBs:       ----> accounts database doesn't exist!", 2)
    
    # Contacts database ()
    if os_version < 2.0:
        contactsdb_name = "contacts.db"
    else:
        contactsdb_name = "contacts2.db"
    try:
        contactsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.contacts/databases/' + contactsdb_name, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        contactsdb.wait()
        _adel_log.log(contactsdb_name + " -> " + contactsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/contacts2.db").hexdigest(), 3)
        hash_value.write(contactsdb_name + " -> " + hashlib.sha256(backup_dir + "/" + contactsdb_name).hexdigest() + " \n")
    except:
        _adel_log.log("dumpDBs:       ----> contacts database doesn't exist!", 2)
    
    # MMS and SMS database ()
    try:
        smsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.telephony/databases/mmssms.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        smsdb.wait()
        _adel_log.log("mmssms.db -> " + smsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/mmssms.db").hexdigest(), 3)
        hash_value.write("mmssms.db -> " + hashlib.sha256(backup_dir + "/mmssms.db").hexdigest() + " \n")
    except:
        _adel_log.log("dumpDBs:       ----> mms/sms database doesn't exist!", 2)
    
    # Calendar database ()
    try:
        calendardb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.calendar/databases/calendar.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        calendardb.wait()
        _adel_log.log("calendar.db -> " + calendardb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/calendar.db").hexdigest(), 3)
        hash_value.write("calendar.db -> " + hashlib.sha256(backup_dir + "/calendar.db").hexdigest() + " \n")
    except:
        _adel_log.log("dumpDBs:       ----> calendar database doesn't exist!", 2)
    
    # Settings database ()
    try:
        settingsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.settings/databases/settings.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        settingsdb.wait()
        _adel_log.log("settings.db -> " + settingsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/settings.db").hexdigest(), 3)
        hash_value.write("settings.db -> " + hashlib.sha256(backup_dir + "/settings.db").hexdigest() + " \n")
    except:
        _adel_log.log("dumpDBs:       ----> settings database doesn't exist!", 2)
    
    # Location caches (cell & wifi)
    if os_version < 2.3:
        try:
            cachecell = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.location/files/cache.cell', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            cachecell.wait()
            _adel_log.log("chache.cell-> " + cachecell.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/chache.cell").hexdigest(), 3)
            hash_value.write("chache.cell -> " + hashlib.sha256(backup_dir + "/chache.cell").hexdigest() + " \n")
        except:
            _adel_log.log("dumpDBs:       ----> cell GPS cache doesn't exist!", 2)
        try:
            cachewifi = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.location/files/cache.wifi', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            cachewifi.wait()
            _adel_log.log("chache.wifi-> " + cachewifi.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/chache.wifi").hexdigest(), 3)
            hash_value.write("chache.wifi -> " + hashlib.sha256(backup_dir + "/chache.wifi").hexdigest() + " \n")
        except:
            _adel_log.log("dumpDBs:       ----> wifi GPS cache doesn't exist!", 2)

    # Optional applications and databases ----> analyzing is not implemented right now
    # Downloaded data and apps database ()
    try:
        downloadsdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.downloads/databases/downloads.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        downloadsdb.wait()
        _adel_log.log("downloads.db -> " + downloadsdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/downloads.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> downloads database doesn't exist!", 2)
    
    # User dictionary database ()
    try:
        userdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.userdictionary/databases/user_dict.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        userdb.wait()
        _adel_log.log("user_dict.db -> " + userdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/user_dict.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> user dict doesn't exist!", 2)    
    # Phone database ()
    try:
        phonedb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.telephony/databases/telephony.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        phonedb.wait()
        _adel_log.log("telephony.db -> " + phonedb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/telephony.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> telephony database doesn't exist!", 2)

    # Automated dictionary database ()
    try:
        autodb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.inputmethod.latin/databases/auto_dict.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        autodb.wait()
        _adel_log.log("auto_dict.db -> " + autodb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/auto_dict.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> auto dict doesn't exist!", 2)

    # Weather data database ()
    try:
        weatherdb = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.apps.genie.geniewidget/databases/weather.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        weatherdb.wait()
        _adel_log.log("weather.db -> " + weatherdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/weather.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> weather database doesn't exist!", 2)
    try:
        weatherdb = subprocess.Popen(['adb', 'pull', '/data/data/com.sec.android.widgetapp.weatherclock/databases/WeatherClock', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        weatherdb.wait()
        _adel_log.log("WeatherClock.db -> " + weatherdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/WeatherClock.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> weather widget doesn't exist!", 2)

    # Google-Mail programm database ()
    try:
        gmaildb = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.gm/databases/gmail.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        gmaildb.wait()
        _adel_log.log("gmail.db -> " + gmaildb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/gmail.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> gmail database doesn't exist!", 2)

    # Other Email Accounts than Gmail ()
    try:
        providerdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.email/databases/EmailProvider.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        providerdb.wait()
        _adel_log.log("EmailProvider.db -> " + providerdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/EmailProvider.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> EmailProvider database doesn't exist!", 2)

    # Clock and alarms database ()
    try:
        alarmdb = subprocess.Popen(['adb', 'pull', '/data/data/com.android.deskclock/databases/alarms.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        alarmdb.wait()
        _adel_log.log("alarms.db -> " + alarmdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/alarms.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> alarms database doesn't exist!", 2)

    # Twitter database ()
    try:
        for i in range(6):
            try:
                file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.twitter.android/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i]
                if ".db" in file_name:
                    twitter_db = '/data/data/com.twitter.android/databases/' + file_name
                    twitter_db_name = subprocess.Popen(['adb', 'pull', twitter_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
                    twitter_db_name.wait()
                    _adel_log.log(file_name + " -> " + twitter_db_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3)
                else:
                    continue
            except:
                continue
    except:
        _adel_log.log("dumpDBs:       ----> twitter database doesn't exist!", 2)

    # Google-Talk database ()
    try:
        gtalkdb = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.gsf/databases/talk.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        gtalkdb.wait()
        _adel_log.log("talk.db -> " + gtalkdb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/talk.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> Google-Talk database doesn't exist!", 2)

    # Search and download the Google-Mail mail database ()
    try:
        for i in range(6):
            file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.google.android.gm/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i]
            if file_name.startswith('mailstore'):
                mail_db = '/data/data/com.google.android.gm/databases/' + file_name
                emaildb = subprocess.Popen(['adb', 'pull', mail_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
                emaildb.wait()
                _adel_log.log(file_name + " -> " + emaildb.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3)
                break
            else:
                continue
    except:
        _adel_log.log("dumpDBs:       ----> Google-Mail database doesn't exist!", 2)

    # Google+ database
    try:
        for i in range(6):
            try:
                file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.google.android.apps.plus/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i]
                if ".db" in file_name:
                    plus_db = '/data/data/com.google.android.apps.plus/databases/' + file_name
                    plus_db_name = subprocess.Popen(['adb', 'pull', plus_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
                    plus_db_name.wait()
                    _adel_log.log(file_name + " -> " + plus_db_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3)
                else:
                    continue
            except:
                continue
    except:
        _adel_log.log("dumpDBs:       ----> Google+ database doesn't exist!", 2)

    # Google-Maps database
    try:
        try:
            maps_file_name = subprocess.Popen(['adb', 'pull', '/data/data/com.google.android.apps.maps/databases/da_destination_history', backup_dir + "/da_destination_history.db"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            maps_file_name.wait()
            _adel_log.log("da_destination_history -> " + maps_file_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "da_destination_history.db").hexdigest(), 3)
        except:
            _adel_log.log("dumpDBs:       ----> Google-Maps navigation history doesn't exist!", 2)
        for i in range(6):
            try:
                file_name = subprocess.Popen(['adb', 'shell', 'ls', '/data/data/com.google.android.apps.maps/databases/'], stdout=subprocess.PIPE).communicate(0)[0].split()[i]
                if ".db" in file_name:
                    maps_db = '/data/data/com.google.android.apps.maps/databases/' + file_name
                    maps_db_name = subprocess.Popen(['adb', 'pull', maps_db, backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
                    maps_db_name.wait()
                    _adel_log.log(file_name + " -> " + maps_db_name.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + file_name).hexdigest(), 3)
                else:
                    continue
            except:
                continue
    except:
        _adel_log.log("dumpDBs:       ----> Google-Maps database doesn't exist!", 2)

    # Facebook database
    try:
        facebook = subprocess.Popen(['adb', 'pull', '/data/data/com.facebook.katana/databases/fb.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        facebook.wait()
        _adel_log.log("fb.db -> " + facebook.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/fb.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> Facebook database doesn't exist!", 2)

    # Browser GPS database
    try:
        browserGPS = subprocess.Popen(['adb', 'pull', '/data/data/com.android.browser/app_geolocation/CachedGeoposition.db', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        browserGPS.wait()
        _adel_log.log("CachedGeoposition.db -> " + browserGPS.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/CachedGeoposition.db").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> Cached geopositions within browser don't exist!", 2)

    # Gesture Lock File
    try:
        gesture = subprocess.Popen(['adb', 'pull', '/data/system/gesture.key', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        gesture.wait()
        _adel_log.log("gesture.key -> " + gesture.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/gesture.key").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> No gesture lock found!", 2)

    # Password Lock File
    try:
        password = subprocess.Popen(['adb', 'pull', '/data/system/password.key', backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        password.wait()
        _adel_log.log("password.key -> " + password.communicate(0)[1].split("(")[1].split(")")[0] + " -> " + hashlib.sha256(backup_dir + "/password.key").hexdigest(), 3)
    except:
        _adel_log.log("dumpDBs:       ----> No password lock found!", 2)

    # Stored files (pictures, docuements, etc.)
    if device_name != "local":
        # Pictures
        picture_dir = backup_dir.split("/")[0] + "/pictures/"
        os.mkdir(picture_dir)
        try:
            _adel_log.log("dumpDBs:       ----> dumping pictures (internal_sdcard)....", 0)
            pictures = subprocess.Popen(['adb', 'pull', '/sdcard/DCIM/Camera/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            pictures.wait()
        except:
            _adel_log.log("dumpDBs:       ----> No pictures on the internal SD-card found!", 2)
        try:
            pictures = subprocess.Popen(['adb', 'pull', '/data/media/0/DCIM/Camera/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            pictures.wait()
        except:
            _adel_log.log("dumpDBs:       ----> No pictures on the internal SD-card (alternate path) found!", 2)
        try:
            _adel_log.log("dumpDBs:       ----> dumping pictures (external_sdcard)....", 0)
            pictures = subprocess.Popen(['adb', 'pull', '/sdcard/external_sd/DCIM/Camera/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            pictures.wait()
        except:
            _adel_log.log("dumpDBs:       ----> No pictures on the external SD-card found!", 2)
        try:
            _adel_log.log("dumpDBs:       ----> dumping screen captures (internal_sdcard)....", 0)
            pictures = subprocess.Popen(['adb', 'pull', '/sdcard/ScreenCapture/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            pictures.wait()
        except:
            _adel_log.log("dumpDBs:       ----> No screen captures on the internal SD-card found!", 2)
        try:
            _adel_log.log("dumpDBs:       ----> dumping screen captures (internal_sdcard)....", 0)
            pictures = subprocess.Popen(['adb', 'pull', '/data/media/0/ScreenCapture/', picture_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            pictures.wait()
        except:
            _adel_log.log("dumpDBs:       ----> No screen captures on the internal SD-card (alternate path) found!", 2)
    hash_value.close()    

Example 163

Project: public-domain-season-songs Source File: run_lily_run.py
def process_file(ly, dryrun):
    filename = ly.rsplit(".", 2)[0]
    infile = os.path.join(inFolder, ly)
    inp = open(infile, 'rb')
    with open(os.path.join(FOLDER_TEMP, FILENAME_TEMP), 'wb') as outp:
        outp.write("""
    \header{{
        tagline = " " %remove the »Music engraving by LilyPond«
    }}

    \paper {{
      myStaffSize = #20
      %{{
         run
             lilypond -dshow-available-fonts blabla
         to show all fonts available in the process log.
      %}}

      #(define fonts
        (make-pango-font-tree "Linux Libertine"
                              "Linux Libertine"
                              "Linux Libertine Mono"
        (/ myStaffSize 20)))
        %system-system-spacing #'stretchability = #0
        %ragged-last-bottom = ##t
        %ragged-bottom = ##t
        %print-page-number = ##f
        #(set-paper-size "a4")
    }}
    """.format(margin=0))

        tw = inp.read()
        tw = tw.decode("utf-8")
        tw = tw.replace(u"\ufeff", "")
        inpaper = False
        name = None
        removed_lines = []
        markup = False
        markupc = 0
        song_text = []
        composer = poet = ""
        for line in tw.split("\n"):
            r = re.match(r'\W*title\W*=\W*"([^"]+)"', line)
            rcomposer = re.match(r'\W*composer\W*=\W*"([^"]+)"', line)
            rpoet = re.match(r'\W*poet\W*=\W*"([^"]+)"', line)
            komplizierter_poet = re.findall(r'"([^"]+)"', line)
            if inpaper or "\paper" in line:
                inpaper = True
                if "}" in line:
                    inpaper = False
                removed_lines.append(line)
            if markup or re.findall(r"^\s*\\markup", line):
                markup = True
                markupc += line.count("{")
                markupc -= line.count("}")
                if markupc == 0:
                    markup = False
                if r"\bold" in line:
                    num = re.findall(r'"\s*(\d+)\s*\.?\s*"', line)
                    if num:
                        song_text.append(num[0])
                    else:
                        line = re.sub(
                            '["{}]',
                            "",
                            re.sub(r"\\.*?[{ ]", "", line)
                        )
                        song_text.append(line)
                else:
                    m = re.match(r'\s*"([^"]*)"\s*', line)
                    if m:
                        song_text.append(m.groups()[0])
                    else:
                        removed_lines.append(line)
            elif r and len(r.groups()) == 1:
                name = r.groups()[0]
                removed_lines.append(line)
            elif rcomposer and len(rcomposer.groups()) == 1:
                composer = rcomposer.groups()[0]
            elif rpoet and len(rpoet.groups()) == 1:
                poet = rpoet.groups()[0]
            elif "set-global-staff-size" in line:
                removed_lines.append(line)
            elif "tagline" in line:
                removed_lines.append(line)
            elif "set-default-paper-size" in line:
                removed_lines.append(line)
            elif "version" in line:
                removed_lines.append(line)
            elif "opus" in line:
                removed_lines.append(line)
            elif r"\tempo" in line:
                removed_lines.append(line)
            elif any((x in line for x in ("top-margin", "bottom-margin", "left-margin", "right-margin", "line-width"))):
                removed_lines.append(line)
            elif "copyright" in line and "=" in line and "\"" in line:
                removed_lines.append(line)
            elif "subtitle" in line and "=" in line:
                removed_lines.append(line)
            elif "poet" in line and komplizierter_poet:
                #print komplizierter_poet
                komplizierter_poet = map(lambda x: x, komplizierter_poet)
                gr = u"\n".join(komplizierter_poet[1:])
                poet = u"{0} {1}".format(komplizierter_poet[0], gr)
                #print type(poet), poet
            else:
                outp.write(line.encode("utf-8"))
                outp.write("\n")

        inp.seek(0)
        file_content = inp.read()
        inp.close()

    to_return = {}

    ext = "eps"
    if (not dryrun):
        cl = [
            "lilypond",
            #"-V",
            "-I",
            os.path.abspath("../"),
            "--ps",
            "-d",
            "point-and-click=#f",
            "-dbackend=eps",
            "-o",
            "tmp",  # TODO: GLOBAL VARIABLE FOR THIS!
            FILENAME_TEMP
        ]
        sub = subprocess.Popen(
            cl,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            cwd=FOLDER_TEMP,
        )
        status = sub.wait()

        to_return.update({
            "lilypond": status,
            "lilypond_stdout": sub.stdout.read(),
            "lilypond_stderr": sub.stderr.read(),
        })

        shutil.copy(
            os.path.join(FOLDER_TEMP, "tmp.{}".format(ext)),
            os.path.join(outFolder, "{}.{}".format(filename, ext))
        )

    to_return.update({
        "file_content": file_content,
        "removed_lines": removed_lines,
        "data": {
            "filename": "{}.{}".format(filename, ext),
            "name": name,
            "poet": poet,
            "composer": composer,
            "text": song_text,
        },
        "lilypond": -1,
    })
    return to_return

Example 164

Project: mysql-utilities Source File: import_errors.py
    def run(self):
        self.res_fname = "result.txt"

        from_conn = "--server={0}".format(
            self.build_connection_string(self.server1)
        )
        to_conn = "--server={0}".format(
            self.build_connection_string(self.server2)
        )

        _FORMATS = ("CSV", "TAB", "GRID", "VERTICAL")
        test_num = 1
        for frmt in _FORMATS:
            comment = ("Test Case {0} : Testing import with "
                       "{1} format and NAMES display").format(test_num, frmt)
            # We test DEFINITIONS and DATA only in other tests
            self.run_import_test(1, from_conn, to_conn, ['util_test'], frmt,
                                 "BOTH", comment, " --display=NAMES")
            self.drop_db(self.server2, "util_test")
            test_num += 1

        export_cmd = ("mysqldbexport.py {0} util_test --export=BOTH "
                      "--format=SQL --skip-gtid  > "
                      "{1}").format(from_conn, self.export_import_file)

        # First run the export to a file.
        comment = "Running export..."
        res = self.run_test_case(0, export_cmd, comment)
        if not res:
            raise MUTLibError("EXPORT: {0}: failed".format(comment))

        import_cmd = "mysqldbimport.py {0}".format(to_conn)

        comment = "Test case {0} - no file specified ".format(test_num)
        cmd_str = "{0} --import=BOTH --format=SQL".format(import_cmd)
        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        import_cmd = ("{0} {1} --import=BOTH "
                      "--format=SQL").format(import_cmd,
                                             self.export_import_file)

        test_num += 1
        comment = "Test case {0} - bad --skip values".format(test_num)
        cmd_str = "{0} --skip=events,wiki-waki,woo-woo".format(import_cmd)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - exporting data and skipping "
                   "data").format(test_num)
        cmd_str = "{0} --skip=data --import=data".format(import_cmd)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = "Test case {0} - cannot parse --server".format(test_num)
        cmd_str = ("mysqldbimport.py --server=rocks_rocks_rocks "
                   "{0}").format(self.export_import_file)
        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: cannot connect to "
                   "server").format(test_num)
        cmd_str = ("mysqldbimport.py --server=nope:nada@localhost:{0} "
                   "{1}").format(self.server0.port, self.export_import_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.server2.exec_query("CREATE USER 'joe'@'localhost'")

        # Watchout for Windows: it doesn't use sockets!
        joe_conn = "--server=joe@localhost:{0}".format(self.server2.port)
        if os.name == "posix" and self.server2.socket is not None:
            joe_conn = "{0}:{1}".format(joe_conn, self.server2.socket)

        test_num += 1
        comment = ("Test case {0} - error: not enough "
                   "privileges").format(test_num)
        cmd_str = "mysqldbimport.py {0} {1}".format(joe_conn,
                                                    self.export_import_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: not enough "
                   "privileges").format(test_num)
        cmd_str = ("mysqldbimport.py {0} {1} "
                   "--import=definitions").format(joe_conn,
                                                  self.export_import_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = "Test case {0} - error: bad SQL statements".format(test_num)
        bad_sql_file = os.path.normpath("./std_data/bad_sql.sql")
        cmd_str = ("mysqldbimport.py {0} {1} "
                   "--import=definitions").format(to_conn, bad_sql_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.drop_db(self.server2, "util_test")

        # Skipping create and doing the drop should be illegal.
        test_num += 1
        comment = ("Test case {0} - error: --skip=create_db & "
                   "--drop-first").format(test_num)
        cmd_str = ("{0} {1} --skip=create_db --format=sql --import=data "
                   "--drop-first ").format(import_cmd, self.export_import_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.drop_db(self.server2, "util_test")

        import_cmd = "mysqldbimport.py {0}".format(to_conn)

        test_num += 1
        comment = "Test case {0} - warning: --skip-blobs".format(test_num)
        cmd_str = ("{0} --skip-blobs --format=sql --import=definitions "
                   "{1}").format(import_cmd, self.export_import_file)
        res = self.run_test_case(0, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: --skip=data & "
                   "--import=data").format(test_num)
        cmd_str = ("{0} --skip=data --format=sql --import=data "
                   "{1}").format(import_cmd, self.export_import_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: bad object "
                   "definition").format(test_num)
        bad_csv_file = os.path.normpath("./std_data/bad_object.csv")
        cmd_str = ("{0} --format=csv --import=both "
                   "{1}").format(import_cmd, bad_csv_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        # Test database with backticks
        _FORMATS_BACKTICKS = ("CSV", "TAB")
        for frmt in _FORMATS_BACKTICKS:
            comment = ("Test Case {0} : Testing import with {1} format and "
                       "NAMES display (using backticks)").format(test_num,
                                                                 frmt)
            self.run_import_test(1, from_conn, to_conn, ['`db``:db`'],
                                 frmt, "BOTH", comment, " --display=NAMES")
            self.drop_db(self.server2, '`db``:db`')
            test_num += 1

        comment = "Test case {0} - invalid --character-set".format(test_num)
        cmd_str = ("mysqldbimport.py {0} {1} "
                   "--character-set=unsupported_charset"
                   "".format(self.export_import_file, to_conn))
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Run export to re-create the export file.
        comment = "Running export to {0}...".format(self.export_import_file)
        res = self.run_test_case(0, export_cmd, comment)
        if not res:
            raise MUTLibError("EXPORT: {0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: invalid multiprocess "
                   "value.").format(test_num)
        cmd_str = ("{0} --format=sql --import=both --multiprocess=0.5 "
                   "{1}").format(import_cmd, self.export_import_file)
        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: multiprocess value smaller than "
                   "zero.").format(test_num)
        cmd_str = ("{0} --format=sql --import=both --multiprocess=-1 "
                   "{1}").format(import_cmd, self.export_import_file)
        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: invalid max bulk insert "
                   "value.").format(test_num)
        cmd_str = ("{0} --format=sql --import=both --max-bulk-insert=2.5 "
                   "{1}").format(import_cmd, self.export_import_file)
        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: max bulk insert value not greater "
                   "than one.").format(test_num)
        cmd_str = ("{0} --format=sql --import=both --max-bulk-insert=1 "
                   "{1}").format(import_cmd, self.export_import_file)
        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.drop_db(self.server2, "util_test")

        test_num += 1
        comment = ("Test case {0} - warning: max bulk insert ignored without "
                   "bulk insert option.").format(test_num)
        cmd_str = ("{0} --format=sql --import=both --max-bulk-insert=10000 "
                   "{1}").format(import_cmd, self.export_import_file)
        res = self.run_test_case(0, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: Use --drop-first to drop the "
                   "database before importing.").format(test_num)
        data_file = os.path.normpath("./std_data/basic_data.sql")
        cmd_str = ("{0} --format=sql --import=both "
                   "{1}").format(import_cmd, data_file)
        res = self.run_test_case(1, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: Is not a valid path to a file."
                   "").format(test_num)
        cmd_str = ("{0} --format=sql --import=both not_exist.sql"
                   "").format(import_cmd)
        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - error: Without permission to read a file."
                   "").format(test_num)
        cmd_str = ("{0} --format=sql --import=both {1}"
                   "").format(import_cmd, self.perms_test_file)

        # Create file without read permission.
        with open(self.perms_test_file, "w"):
            pass
        if os.name == "posix":
            os.chmod(self.perms_test_file, 0200)
        else:
            proc = subprocess.Popen(["icacls", self.perms_test_file, "/deny",
                                     "everyone:(R)"], stdout=subprocess.PIPE)
            proc.communicate()

        res = self.run_test_case(2, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Handle message with path (replace '\' by '/').
        if os.name != "posix":
            self.replace_result("# Importing definitions and data from "
                                "std_data\\bad_object.csv",
                                "# Importing definitions and data from "
                                "std_data/bad_object.csv.\n")
            self.replace_result("# Importing definitions from "
                                "std_data\\bad_sql.sql",
                                "# Importing definitions from "
                                "std_data/bad_sql.sql.\n")
            self.replace_result("# Importing definitions and data from "
                                "std_data\\basic_data.sql.",
                                "# Importing definitions and data from "
                                "std_data/basic_data.sql.\n")

        # Mask known source and destination host name.
        self.replace_substring("on localhost", "on XXXX-XXXX")
        self.replace_substring("on [::1]", "on XXXX-XXXX")

        self.replace_substring(" (28000)", "")
        self.replace_result("ERROR: Query failed.", "ERROR: Query failed.\n")

        self.replace_substring("Error 1045 (28000):", "Error")
        self.replace_substring("Error 1045:", "Error")

        self.replace_result("mysqldbimport: error: Server connection "
                            "values invalid",
                            "mysqldbimport: error: Server connection "
                            "values invalid\n")

        return True

Example 165

Project: CrisisMappingToolkit Source File: daily_detector.py
def main(argsIn):

    #logger = logging.getLogger() TODO: Switch to using a logger!

    try:
          usage = "usage: daily_detector.py [--help]\n  "
          parser = optparse.OptionParser(usage=usage)

          parser.add_option("--archive-results", dest="archiveResults", action="store_true", default=False,
                            help="Archive results so they can be found by the web API.")
          parser.add_option("--manual", dest="showManual", action="store_true", default=False,
                            help="Display more usage information about the tool.")
          (options, args) = parser.parse_args(argsIn)

          if options.showManual:
              print manual
              return 0

    except optparse.OptionError, msg:
        raise Usage(msg)

    print '---=== Starting daily flood detection process ===---'

    date = datetime.datetime.now()
    dateString = ('%d-%02d-%02d' % (date.year, date.month, date.day))

    # Store outputs here before they are archived
    BASE_OUTPUT_FOLDER = '/home/smcmich1/data/Floods/auto_detect'

    # Look at flood alerts this many days old
    DAY_SPAN = 7

    # Search this far around the center point in degrees
    # - If it is too large Earth Engine will time out during processing!
    REGION_SIZE = 0.5

    # How many days around each flood alert to look for images
    MAX_SEARCH_DAYS      = '7'
    MAX_CLOUD_PERCENTAGE = '0.50'
    RECORD_INPUTS        = False # Save the processing inputs?
        
    # Get a list of search regions for today
    (searchRegions, labels) = getSearchRegions(date, DAY_SPAN, REGION_SIZE)
                      
    print 'Detected ' + str(len(searchRegions)) + ' candidate flood regions.'

    dateFolder = os.path.join(BASE_OUTPUT_FOLDER, dateString)
    if not os.path.exists(dateFolder):
        os.mkdir(dateFolder)

    for (region, label) in zip(searchRegions, labels):
        print '---------------------------------------------'
        
        #if label in ['Sudan']: #DEBUG
        #    continue
        
        centerPoint = ( (region[0] + region[2])/2.0, 
                        (region[1] + region[3])/2.0 )
        
        print 'Detecting floods in '+label+': ' + str(region)
        
        outputFolder = os.path.join(dateFolder, label)
        if not os.path.exists(outputFolder):
            os.mkdir(outputFolder)
        #try:
        
        # Run this command as a subprocess so we can capture all the output for a log file
        cmd = ['python', os.path.join(os.path.dirname(os.path.realpath(__file__)),'detect_flood_cmd.py'), 
               '--search-days', MAX_SEARCH_DAYS, 
               '--max-cloud-percentage', MAX_CLOUD_PERCENTAGE]
              # '--',
        if RECORD_INPUTS:
            cmd.append('--save-inputs')
        cmd += ['--', outputFolder, dateString, 
               str(region[0]), str(region[1]), 
               str(region[2]), str(region[3])]
        print ' '.join(cmd)
        p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
        textOutput, err = p.communicate()
        print textOutput
       
        # Log the program output to disk
        logPath = os.path.join(outputFolder, 'log.txt')
        with open(logPath, 'w') as f:
            f.write(str(cmd))
            f.write('\n============================================\n')
            f.write(textOutput)
        
                        
        # Check if we successfully generated a kml output file
        kmlPath = os.path.join(outputFolder, 'floodCoords.kml')
        if not os.path.exists(kmlPath):
            #raise Exception('DEBUG')
            continue
        
        # Read the sensors we used from the file and add them to the title
        floodInfo = None
        with open(kmlPath) as f:
            for line in f:
                if '<description>' in line:
                    floodInfo = detect_flood_cmd.parseKmlDescription(line)
                    break
        if not floodInfo:
            raise Exception('Failed to load flood information!')

        pairs = [('modis', 'M'), ('landsat', 'L'), ('sentinel-1', 'S')]
        sensorCode = '' # Will be something like "MLS"
        # Look for fields in floodInfo indicating the precense of each sensor.
        for pair in pairs:
            for s in floodInfo.keys():
                if pair[0] in s:
                    sensorCode += pair[1]
                    break
        
        # Insert the center into the kml file name    
        newKmlName = (('results_%s_%s_%05f_%05f.kml') % (label, sensorCode, centerPoint[0], centerPoint[1]))
        newKmlPath = os.path.join(outputFolder, newKmlName)
        shutil.move(kmlPath, newKmlPath)

        if options.archiveResults:
           archiveResult(newKmlPath, dateString)
        
        #raise Exception('DEBUG')
        
        #except Exception as e:
        #    print 'Failure!'
        #    print str(e)
        #    print sys.exc_info()[0]
        #    pass

    if options.archiveResults:
        print 'Finalizing archive folder...'
        updateArchiveFolder()
    print 'Done!'

Example 166

Project: pueue Source File: daemon.py
Function: main
    def main(self):
        while self.active:
            # Check if there is a running process
            if self.process is not None:
                # Poll process and check to check for termination
                self.process.poll()
                if self.process.returncode is not None:
                    # If a process is terminated by `stop` or `kill`
                    # we want to queue it again instead closing it as failed.
                    if not self.stopping:
                        # Get std_out and err_out
                        output, error_output = self.process.communicate()
                        self.stdout.seek(0)
                        output = self.stdout.read().replace('\n', '\n    ')

                        self.stderr.seek(0)
                        error_output = self.stderr.read().replace('\n', '\n    ')

                        # Mark queue entry as finished and save returncode
                        self.queue[self.current_key]['returncode'] = self.process.returncode
                        if self.process.returncode != 0:
                            self.queue[self.current_key]['status'] = 'errored'
                        else:
                            self.queue[self.current_key]['status'] = 'done'

                        # Add outputs to log
                        self.queue[self.current_key]['stdout'] = output
                        self.queue[self.current_key]['stderr'] = error_output
                        self.queue[self.current_key]['end'] = str(datetime.now().strftime("%H:%M"))

                        # Pause Daemon, if it is configured to stop
                        if self.config['default']['stopAtError'] is True and not self.reset:
                            if self.process.returncode != 0:
                                self.paused = True

                        self.write_queue()
                        self.log()
                    else:
                        # Process finally finished.
                        # Now we can set the status to paused.
                        self.paused = True
                        self.stopping = False
                        if self.remove_current is True:
                            self.remove_current = False
                            del self.queue[self.current_key]
                        else:
                            self.queue[self.current_key]['status'] = 'queued'

                    self.process = None
                    self.current_key = None
                    self.processStatus = 'No running process'

            if self.reset:
                # Rotate log
                self.log(rotate=True)

                # Reset  queue
                self.queue = {}
                self.write_queue()

                # Reset Log
                self.log()
                self.nextKey = 0
                self.reset = False

            # Start next Process
            if not self.paused and len(self.queue) > 0 and self.process is None:
                self.current_key = self.get_next_item()
                if self.current_key is not None:
                    # Get instruction for next process
                    next_item = self.queue[self.current_key]
                    #
                    self.stdout.seek(0)
                    self.stdout.truncate()
                    self.stderr.seek(0)
                    self.stderr.truncate()
                    # Spawn subprocess
                    self.process = subprocess.Popen(
                        next_item['command'],
                        shell=True,
                        stdout=self.stdout,
                        stderr=self.stderr,
                        stdin=subprocess.PIPE,
                        universal_newlines=True,
                        cwd=next_item['path']
                    )
                    self.queue[self.current_key]['status'] = 'running'
                    self.queue[self.current_key]['start'] = str(datetime.now().strftime("%H:%M"))
                    self.processStatus = 'running'

            # Create list for waitable objects
            readable, writable, errored = select.select(self.read_list, [], [], 1)
            for socket in readable:
                if socket is self.socket:
                    # Listening for clients to connect.
                    # Client sockets are added to readlist to be processed.
                    try:
                        self.clientSocket, self.clientAddress = self.socket.accept()
                        self.read_list.append(self.clientSocket)
                    except:
                        print('Daemon rejected client')
                else:
                    # Trying to receive instruction from client socket
                    try:
                        instruction = self.clientSocket.recv(1048576)
                    except EOFError:
                        print('Client died while sending message, dropping received data.')
                        instruction = -1

                    # Check for valid instruction
                    if instruction != -1:
                        # Check if received data can be unpickled.
                        # Instruction will be ignored if it can't be unpickled
                        try:
                            command = pickle.loads(instruction)
                        except EOFError:
                            print('Received message is incomplete, dropping received data.')
                            self.read_list.remove(self.clientSocket)
                            self.clientSocket.close()

                            command = {}
                            command['mode'] = ''

                        # Executing respective function depending on command mode
                        if command['mode'] == 'add':
                            self.respond_client(self.execute_add(command))

                        elif command['mode'] == 'remove':
                            self.respond_client(self.execute_remove(command))

                        elif command['mode'] == 'switch':
                            self.respond_client(self.execute_switch(command))

                        elif command['mode'] == 'send':
                            self.respond_client(self.execute_send(command))

                        elif command['mode'] == 'status':
                            self.respond_client(self.execute_status(command))

                        elif command['mode'] == 'reset':
                            self.respond_client(self.execute_reset())

                        elif command['mode'] == 'start':
                            self.respond_client(self.execute_start())

                        elif command['mode'] == 'pause':
                            self.respond_client(self.execute_pause(command))

                        elif command['mode'] == 'restart':
                            self.respond_client(self.execute_restart(command))

                        elif command['mode'] == 'stop':
                            self.respond_client(self.execute_stop(command))

                        elif command['mode'] == 'kill':
                            self.respond_client(self.execute_kill(command))

                        elif command['mode'] == 'STOPDAEMON':
                            self.respond_client({'message': 'Pueue daemon shutting down',
                                                'status': 'success'})
                            # Kill current process and set active
                            # to False to stop while loop
                            self.active = False
                            self.execute_kill({'remove': False})
                            break

        self.socket.close()
        os.remove(get_socket_path())
        sys.exit(0)

Example 167

Project: OpenUpgrade Source File: report.py
    def _run_wkhtmltopdf(self, cr, uid, headers, footers, bodies, landscape, paperformat, spec_paperformat_args=None, save_in_attachment=None, set_viewport_size=False):
        """Execute wkhtmltopdf as a subprocess in order to convert html given in input into a pdf
        docuement.

        :param header: list of string containing the headers
        :param footer: list of string containing the footers
        :param bodies: list of string containing the reports
        :param landscape: boolean to force the pdf to be rendered under a landscape format
        :param paperformat: ir.actions.report.paperformat to generate the wkhtmltopf arguments
        :param specific_paperformat_args: dict of prioritized paperformat arguments
        :param save_in_attachment: dict of reports to save/load in/from the db
        :returns: Content of the pdf as a string
        """
        if not save_in_attachment:
            save_in_attachment = {}

        command_args = []
        if set_viewport_size:
            command_args.extend(['--viewport-size', landscape and '1024x1280' or '1280x1024'])

        # Passing the cookie to wkhtmltopdf in order to resolve internal links.
        try:
            if request:
                command_args.extend(['--cookie', 'session_id', request.session.sid])
        except AttributeError:
            pass

        # Wkhtmltopdf arguments
        command_args.extend(['--quiet'])  # Less verbose error messages
        if paperformat:
            # Convert the paperformat record into arguments
            command_args.extend(self._build_wkhtmltopdf_args(paperformat, spec_paperformat_args))

        # Force the landscape orientation if necessary
        if landscape and '--orientation' in command_args:
            command_args_copy = list(command_args)
            for index, elem in enumerate(command_args_copy):
                if elem == '--orientation':
                    del command_args[index]
                    del command_args[index]
                    command_args.extend(['--orientation', 'landscape'])
        elif landscape and '--orientation' not in command_args:
            command_args.extend(['--orientation', 'landscape'])

        # Execute WKhtmltopdf
        pdfdocuements = []
        temporary_files = []

        for index, reporthtml in enumerate(bodies):
            local_command_args = []
            pdfreport_fd, pdfreport_path = tempfile.mkstemp(suffix='.pdf', prefix='report.tmp.')
            temporary_files.append(pdfreport_path)

            # Directly load the docuement if we already have it
            if save_in_attachment and save_in_attachment['loaded_docuements'].get(reporthtml[0]):
                with closing(os.fdopen(pdfreport_fd, 'w')) as pdfreport:
                    pdfreport.write(save_in_attachment['loaded_docuements'][reporthtml[0]])
                pdfdocuements.append(pdfreport_path)
                continue
            else:
                os.close(pdfreport_fd)

            # Wkhtmltopdf handles header/footer as separate pages. Create them if necessary.
            if headers:
                head_file_fd, head_file_path = tempfile.mkstemp(suffix='.html', prefix='report.header.tmp.')
                temporary_files.append(head_file_path)
                with closing(os.fdopen(head_file_fd, 'w')) as head_file:
                    head_file.write(headers[index])
                local_command_args.extend(['--header-html', head_file_path])
            if footers:
                foot_file_fd, foot_file_path = tempfile.mkstemp(suffix='.html', prefix='report.footer.tmp.')
                temporary_files.append(foot_file_path)
                with closing(os.fdopen(foot_file_fd, 'w')) as foot_file:
                    foot_file.write(footers[index])
                local_command_args.extend(['--footer-html', foot_file_path])

            # Body stuff
            content_file_fd, content_file_path = tempfile.mkstemp(suffix='.html', prefix='report.body.tmp.')
            temporary_files.append(content_file_path)
            with closing(os.fdopen(content_file_fd, 'w')) as content_file:
                content_file.write(reporthtml[1])

            try:
                wkhtmltopdf = [_get_wkhtmltopdf_bin()] + command_args + local_command_args
                wkhtmltopdf += [content_file_path] + [pdfreport_path]
                process = subprocess.Popen(wkhtmltopdf, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, err = process.communicate()

                if process.returncode not in [0, 1]:
                    raise UserError(_('Wkhtmltopdf failed (error code: %s). '
                                      'Message: %s') % (str(process.returncode), err))

                # Save the pdf in attachment if marked
                if reporthtml[0] is not False and save_in_attachment.get(reporthtml[0]):
                    with open(pdfreport_path, 'rb') as pdfreport:
                        attachment = {
                            'name': save_in_attachment.get(reporthtml[0]),
                            'datas': base64.encodestring(pdfreport.read()),
                            'datas_fname': save_in_attachment.get(reporthtml[0]),
                            'res_model': save_in_attachment.get('model'),
                            'res_id': reporthtml[0],
                        }
                        try:
                            self.pool['ir.attachment'].create(cr, uid, attachment)
                        except AccessError:
                            _logger.info("Cannot save PDF report %r as attachment", attachment['name'])
                        else:
                            _logger.info('The PDF docuement %s is now saved in the database',
                                         attachment['name'])

                pdfdocuements.append(pdfreport_path)
            except:
                raise

        # Return the entire docuement
        if len(pdfdocuements) == 1:
            entire_report_path = pdfdocuements[0]
        else:
            entire_report_path = self._merge_pdf(pdfdocuements)
            temporary_files.append(entire_report_path)

        with open(entire_report_path, 'rb') as pdfdocuement:
            content = pdfdocuement.read()

        # Manual cleanup of the temporary files
        for temporary_file in temporary_files:
            try:
                os.unlink(temporary_file)
            except (OSError, IOError):
                _logger.error('Error when trying to remove file %s' % temporary_file)

        return content

Example 168

Project: openshift-ansible Source File: openshift_cert_expiry.py
def main():
    """This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
    """

    module = AnsibleModule(
        argument_spec=dict(
            config_base=dict(
                required=False,
                default="/etc/origin",
                type='str'),
            warning_days=dict(
                required=False,
                default=30,
                type='int'),
            show_all=dict(
                required=False,
                default=False,
                type='bool')
        ),
        supports_check_mode=True,
    )

    # Basic scaffolding for OpenShift specific certs
    openshift_base_config_path = module.params['config_base']
    openshift_master_config_path = os.path.normpath(
        os.path.join(openshift_base_config_path, "master/master-config.yaml")
    )
    openshift_node_config_path = os.path.normpath(
        os.path.join(openshift_base_config_path, "node/node-config.yaml")
    )
    openshift_cert_check_paths = [
        openshift_master_config_path,
        openshift_node_config_path,
    ]

    # Paths for Kubeconfigs. Additional kubeconfigs are conditionally
    # checked later in the code
    master_kube_configs = ['admin', 'openshift-master',
                           'openshift-node', 'openshift-router',
                           'openshift-registry']

    kubeconfig_paths = []
    for m_kube_config in master_kube_configs:
        kubeconfig_paths.append(
            os.path.normpath(
                os.path.join(openshift_base_config_path, "master/%s.kubeconfig" % m_kube_config)
            )
        )

    # Validate some paths we have the ability to do ahead of time
    openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
    kubeconfig_paths = filter_paths(kubeconfig_paths)

    # etcd, where do you hide your certs? Used when parsing etcd.conf
    etcd_cert_params = [
        "ETCD_CA_FILE",
        "ETCD_CERT_FILE",
        "ETCD_PEER_CA_FILE",
        "ETCD_PEER_CERT_FILE",
    ]

    # Expiry checking stuff
    now = datetime.datetime.now()
    # todo, catch exception for invalid input and return a fail_json
    warning_days = int(module.params['warning_days'])
    expire_window = datetime.timedelta(days=warning_days)

    # Module stuff
    #
    # The results of our cert checking to return from the task call
    check_results = {}
    check_results['meta'] = {}
    check_results['meta']['warning_days'] = warning_days
    check_results['meta']['checked_at_time'] = str(now)
    check_results['meta']['warn_before_date'] = str(now + expire_window)
    check_results['meta']['show_all'] = str(module.params['show_all'])
    # All the analyzed certs accuemulate here
    ocp_certs = []

    ######################################################################
    # Sure, why not? Let's enable check mode.
    if module.check_mode:
        check_results['ocp_certs'] = []
        module.exit_json(
            check_results=check_results,
            msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
            rc=0,
            changed=False
        )

    ######################################################################
    # Check for OpenShift Container Platform specific certs
    ######################################################################
    for os_cert in filter_paths(openshift_cert_check_paths):
        # Open up that config file and locate the cert and CA
        with open(os_cert, 'r') as fp:
            cert_meta = {}
            cfg = yaml.load(fp)
            # cert files are specified in parsed `fp` as relative to the path
            # of the original config file. 'master-config.yaml' with certFile
            # = 'foo.crt' implies that 'foo.crt' is in the same
            # directory. certFile = '../foo.crt' is in the parent directory.
            cfg_path = os.path.dirname(fp.name)
            cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile'])
            cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA'])

        ######################################################################
        # Load the certificate and the CA, parse their expiration dates into
        # datetime objects so we can manipulate them later
        for _, v in cert_meta.iteritems():
            with open(v, 'r') as fp:
                cert = fp.read()
                cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)

                expire_check_result = {
                    'cert_cn': cert_subject,
                    'path': fp.name,
                    'expiry': cert_expiry_date,
                    'days_remaining': time_remaining.days,
                    'health': None,
                }

                classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)

    ######################################################################
    # /Check for OpenShift Container Platform specific certs
    ######################################################################

    ######################################################################
    # Check service Kubeconfigs
    ######################################################################
    kubeconfigs = []

    # There may be additional kubeconfigs to check, but their naming
    # is less predictable than the ones we've already assembled.

    try:
        # Try to read the standard 'node-config.yaml' file to check if
        # this host is a node.
        with open(openshift_node_config_path, 'r') as fp:
            cfg = yaml.load(fp)

        # OK, the config file exists, therefore this is a
        # node. Nodes have their own kubeconfig files to
        # communicate with the master API. Let's read the relative
        # path to that file from the node config.
        node_masterKubeConfig = cfg['masterKubeConfig']
        # As before, the path to the 'masterKubeConfig' file is
        # relative to `fp`
        cfg_path = os.path.dirname(fp.name)
        node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)

        with open(node_kubeconfig, 'r') as fp:
            # Read in the nodes kubeconfig file and grab the good stuff
            cfg = yaml.load(fp)

        c = cfg['users'][0]['user']['client-certificate-data']
        (cert_subject,
         cert_expiry_date,
         time_remaining) = load_and_handle_cert(c, now, base64decode=True)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': fp.name,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
    except IOError:
        # This is not a node
        pass

    for kube in filter_paths(kubeconfig_paths):
        with open(kube, 'r') as fp:
            # TODO: Maybe consider catching exceptions here?
            cfg = yaml.load(fp)

        # Per conversation, "the kubeconfigs you care about:
        # admin, router, registry should all be single
        # value". Following that advice we only grab the data for
        # the user at index 0 in the 'users' list. There should
        # not be more than one user.
        c = cfg['users'][0]['user']['client-certificate-data']
        (cert_subject,
         cert_expiry_date,
         time_remaining) = load_and_handle_cert(c, now, base64decode=True)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': fp.name,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)

    ######################################################################
    # /Check service Kubeconfigs
    ######################################################################

    ######################################################################
    # Check etcd certs
    ######################################################################
    # Some values may be duplicated, make this a set for now so we
    # unique them all
    etcd_certs_to_check = set([])
    etcd_certs = []
    etcd_cert_params.append('dne')
    try:
        with open('/etc/etcd/etcd.conf', 'r') as fp:
            etcd_config = ConfigParser.ConfigParser()
            etcd_config.readfp(FakeSecHead(fp))

        for param in etcd_cert_params:
            try:
                etcd_certs_to_check.add(etcd_config.get('ETCD', param))
            except ConfigParser.NoOptionError:
                # That parameter does not exist, oh well...
                pass
    except IOError:
        # No etcd to see here, move along
        pass

    for etcd_cert in filter_paths(etcd_certs_to_check):
        with open(etcd_cert, 'r') as fp:
            c = fp.read()
            (cert_subject,
             cert_expiry_date,
             time_remaining) = load_and_handle_cert(c, now)

            expire_check_result = {
                'cert_cn': cert_subject,
                'path': fp.name,
                'expiry': cert_expiry_date,
                'days_remaining': time_remaining.days,
                'health': None,
            }

            classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)

    ######################################################################
    # /Check etcd certs
    ######################################################################

    ######################################################################
    # Check router/registry certs
    #
    # These are saved as secrets in etcd. That means that we can not
    # simply read a file to grab the data. Instead we're going to
    # subprocess out to the 'oc get' command. On non-masters this
    # command will fail, that is expected so we catch that exception.
    ######################################################################
    router_certs = []
    registry_certs = []

    ######################################################################
    # First the router certs
    try:
        router_secrets_raw = subprocess.Popen('oc get secret router-certs -o yaml'.split(),
                                              stdout=subprocess.PIPE)
        router_ds = yaml.load(router_secrets_raw.communicate()[0])
        router_c = router_ds['data']['tls.crt']
        router_path = router_ds['metadata']['selfLink']
    except TypeError:
        # YAML couldn't load the result, this is not a master
        pass
    except OSError:
        # The OC command doesn't exist here. Move along.
        pass
    else:
        (cert_subject,
         cert_expiry_date,
         time_remaining) = load_and_handle_cert(router_c, now, base64decode=True)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': router_path,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)

    ######################################################################
    # Now for registry
    try:
        registry_secrets_raw = subprocess.Popen('oc get secret registry-certificates -o yaml'.split(),
                                                stdout=subprocess.PIPE)
        registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
        registry_c = registry_ds['data']['registry.crt']
        registry_path = registry_ds['metadata']['selfLink']
    except TypeError:
        # YAML couldn't load the result, this is not a master
        pass
    except OSError:
        # The OC command doesn't exist here. Move along.
        pass
    else:
        (cert_subject,
         cert_expiry_date,
         time_remaining) = load_and_handle_cert(registry_c, now, base64decode=True)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': registry_path,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)

    ######################################################################
    # /Check router/registry certs
    ######################################################################

    res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)

    msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
        count=res['total'],
        exp=res['expired'],
        warn=res['warning'],
        ok=res['ok'],
        window=int(module.params['warning_days']),
    )

    # By default we only return detailed information about expired or
    # warning certificates. If show_all is true then we will print all
    # the certificates examined.
    if not module.params['show_all']:
        check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
        check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
        check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
        check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
        check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
    else:
        check_results['ocp_certs'] = ocp_certs
        check_results['kubeconfigs'] = kubeconfigs
        check_results['etcd'] = etcd_certs
        check_results['registry'] = registry_certs
        check_results['router'] = router_certs

    # Sort the final results to report in order of ascending safety
    # time. That is to say, the certificates which will expire sooner
    # will be at the front of the list and certificates which will
    # expire later are at the end. Router and registry certs should be
    # limited to just 1 result, so don't bother sorting those.
    check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
    check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
    check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))

    # This module will never change anything, but we might want to
    # change the return code parameter if there is some catastrophic
    # error we noticed earlier
    module.exit_json(
        check_results=check_results,
        summary=res,
        msg=msg,
        rc=0,
        changed=False
    )

Example 169

Project: octavia Source File: plug.py
    def plug_vip(self, vip, subnet_cidr, gateway,
                 mac_address, mtu=None, vrrp_ip=None, host_routes=None):
        # Validate vip and subnet_cidr, calculate broadcast address and netmask
        try:
            render_host_routes = []
            ip = ipaddress.ip_address(
                vip if six.text_type == type(vip) else six.u(vip))
            network = ipaddress.ip_network(
                subnet_cidr if six.text_type == type(subnet_cidr)
                else six.u(subnet_cidr))
            vip = ip.exploded
            broadcast = network.broadcast_address.exploded
            netmask = (network.prefixlen if ip.version is 6
                       else network.netmask.exploded)
            vrrp_version = None
            if vrrp_ip:
                vrrp_ip_obj = ipaddress.ip_address(
                    vrrp_ip if six.text_type == type(vrrp_ip)
                    else six.u(vrrp_ip)
                )
                vrrp_version = vrrp_ip_obj.version
            if host_routes:
                for hr in host_routes:
                    network = ipaddress.ip_network(
                        hr['destination'] if isinstance(
                            hr['destination'], six.text_type) else
                        six.u(hr['destination']))
                    render_host_routes.append({'network': network,
                                               'gw': hr['nexthop']})
        except ValueError:
            return flask.make_response(flask.jsonify(dict(
                message="Invalid VIP")), 400)

        # Check if the interface is already in the network namespace
        # Do not attempt to re-plug the VIP if it is already in the
        # network namespace
        if self._netns_interface_exists(mac_address):
            return flask.make_response(flask.jsonify(dict(
                message="Interface already exists")), 409)

        # This is the interface prior to moving into the netns
        default_netns_interface = self._interface_by_mac(mac_address)

        # Always put the VIP interface as eth1
        primary_interface = consts.NETNS_PRIMARY_INTERFACE
        secondary_interface = "{interface}:0".format(
            interface=primary_interface)

        # We need to setup the netns network directory so that the ifup
        # commands used here and in the startup scripts "sees" the right
        # interfaces and scripts.
        interface_file_path = util.get_network_interface_file(
            primary_interface)
        os.makedirs('/etc/netns/' + consts.AMPHORA_NAMESPACE)
        shutil.copytree(
            '/etc/network',
            '/etc/netns/{}/network'.format(consts.AMPHORA_NAMESPACE),
            symlinks=True,
            ignore=shutil.ignore_patterns('eth0*', 'openssh*'))
        name = '/etc/netns/{}/network/interfaces'.format(
            consts.AMPHORA_NAMESPACE)
        flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
        # mode 00644
        mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
        with os.fdopen(os.open(name, flags, mode), 'w') as int_file:
            int_file.write('auto lo\n')
            int_file.write('iface lo inet loopback\n')
            if not CONF.amphora_agent.agent_server_network_file:
                int_file.write('source /etc/netns/{}/network/'
                               'interfaces.d/*.cfg\n'.format(
                                   consts.AMPHORA_NAMESPACE))

        # write interface file

        mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH

        # If we are using a consolidated interfaces file, just append
        # otherwise clear the per interface file as we are rewriting it
        # TODO(johnsom): We need a way to clean out old interfaces records
        if CONF.amphora_agent.agent_server_network_file:
            flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
        else:
            flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC

        with os.fdopen(os.open(interface_file_path, flags, mode),
                       'w') as text_file:
            text = template_vip.render(
                interface=primary_interface,
                vip=vip,
                vip_ipv6=ip.version is 6,
                broadcast=broadcast,
                netmask=netmask,
                gateway=gateway,
                mtu=mtu,
                vrrp_ip=vrrp_ip,
                vrrp_ipv6=vrrp_version is 6,
                host_routes=render_host_routes,
            )
            text_file.write(text)

        # Update the list of interfaces to add to the namespace
        # This is used in the amphora reboot case to re-establish the namespace
        self._update_plugged_interfaces_file(primary_interface, mac_address)

        # Create the namespace
        netns = pyroute2.NetNS(consts.AMPHORA_NAMESPACE, flags=os.O_CREAT)
        netns.close()

        # Load sysctl in new namespace
        sysctl = pyroute2.NSPopen(consts.AMPHORA_NAMESPACE,
                                  [consts.SYSCTL_CMD, '--system'],
                                  stdout=subprocess.PIPE)
        sysctl.communicate()
        sysctl.wait()
        sysctl.release()

        with pyroute2.IPRoute() as ipr:
            # Move the interfaces into the namespace
            idx = ipr.link_lookup(ifname=default_netns_interface)[0]
        ipr.link('set', index=idx, net_ns_fd=consts.AMPHORA_NAMESPACE,
                 IFLA_IFNAME=primary_interface)

        # bring interfaces up
        self._bring_if_down(primary_interface)
        self._bring_if_down(secondary_interface)
        self._bring_if_up(primary_interface, 'VIP')
        self._bring_if_up(secondary_interface, 'VIP')

        return flask.make_response(flask.jsonify(dict(
            message="OK",
            details="VIP {vip} plugged on interface {interface}".format(
                vip=vip, interface=primary_interface))), 202)

Example 170

Project: oslo.concurrency Source File: processutils.py
def execute(*cmd, **kwargs):
    """Helper method to shell out and execute a command through subprocess.

    Allows optional retry.

    :param cmd:             Passed to subprocess.Popen.
    :type cmd:              string
    :param cwd:             Set the current working directory
    :type cwd:              string
    :param process_input:   Send to opened process.
    :type process_input:    string
    :param env_variables:   Environment variables and their values that
                            will be set for the process.
    :type env_variables:    dict
    :param check_exit_code: Single bool, int, or list of allowed exit
                            codes.  Defaults to [0].  Raise
                            :class:`ProcessExecutionError` unless
                            program exits with one of these code.
    :type check_exit_code:  boolean, int, or [int]
    :param delay_on_retry:  True | False. Defaults to True. If set to True,
                            wait a short amount of time before retrying.
    :type delay_on_retry:   boolean
    :param attempts:        How many times to retry cmd.
    :type attempts:         int
    :param run_as_root:     True | False. Defaults to False. If set to True,
                            the command is prefixed by the command specified
                            in the root_helper kwarg.
    :type run_as_root:      boolean
    :param root_helper:     command to prefix to commands called with
                            run_as_root=True
    :type root_helper:      string
    :param shell:           whether or not there should be a shell used to
                            execute this command. Defaults to false.
    :type shell:            boolean
    :param loglevel:        log level for execute commands.
    :type loglevel:         int.  (Should be logging.DEBUG or logging.INFO)
    :param log_errors:      Should stdout and stderr be logged on error?
                            Possible values are
                            :py:attr:`~.LogErrors.DEFAULT`,
                            :py:attr:`~.LogErrors.FINAL`, or
                            :py:attr:`~.LogErrors.ALL`. Note that the
                            values :py:attr:`~.LogErrors.FINAL` and
                            :py:attr:`~.LogErrors.ALL`
                            are **only** relevant when multiple attempts of
                            command execution are requested using the
                            ``attempts`` parameter.
    :type log_errors:       :py:class:`~.LogErrors`
    :param binary:          On Python 3, return stdout and stderr as bytes if
                            binary is True, as Unicode otherwise.
    :type binary:           boolean
    :param on_execute:      This function will be called upon process creation
                            with the object as a argument.  The Purpose of this
                            is to allow the caller of `processutils.execute` to
                            track process creation asynchronously.
    :type on_execute:       function(:class:`subprocess.Popen`)
    :param on_completion:   This function will be called upon process
                            completion with the object as a argument.  The
                            Purpose of this is to allow the caller of
                            `processutils.execute` to track process completion
                            asynchronously.
    :type on_completion:    function(:class:`subprocess.Popen`)
    :param preexec_fn:      This function will be called
                            in the child process just before the child
                            is executed. WARNING: On windows, we silently
                            drop this preexec_fn as it is not supported by
                            subprocess.Popen on windows (throws a
                            ValueError)
    :type preexec_fn:       function()
    :param prlimit:         Set resource limits on the child process. See
                            below for a detailed description.
    :type prlimit:          :class:`ProcessLimits`
    :returns:               (stdout, stderr) from process execution
    :raises:                :class:`UnknownArgumentError` on
                            receiving unknown arguments
    :raises:                :class:`ProcessExecutionError`
    :raises:                :class:`OSError`

    The *prlimit* parameter can be used to set resource limits on the child
    process.  If this parameter is used, the child process will be spawned by a
    wrapper process which will set limits before spawning the command.

    .. versionchanged:: 3.4
       Added *prlimit* optional parameter.

    .. versionchanged:: 1.5
       Added *cwd* optional parameter.

    .. versionchanged:: 1.9
       Added *binary* optional parameter. On Python 3, *stdout* and *stdout*
       are now returned as Unicode strings by default, or bytes if *binary* is
       true.

    .. versionchanged:: 2.1
       Added *on_execute* and *on_completion* optional parameters.

    .. versionchanged:: 2.3
       Added *preexec_fn* optional parameter.
    """

    cwd = kwargs.pop('cwd', None)
    process_input = kwargs.pop('process_input', None)
    env_variables = kwargs.pop('env_variables', None)
    check_exit_code = kwargs.pop('check_exit_code', [0])
    ignore_exit_code = False
    delay_on_retry = kwargs.pop('delay_on_retry', True)
    attempts = kwargs.pop('attempts', 1)
    run_as_root = kwargs.pop('run_as_root', False)
    root_helper = kwargs.pop('root_helper', '')
    shell = kwargs.pop('shell', False)
    loglevel = kwargs.pop('loglevel', logging.DEBUG)
    log_errors = kwargs.pop('log_errors', None)
    if log_errors is None:
        log_errors = LogErrors.DEFAULT
    binary = kwargs.pop('binary', False)
    on_execute = kwargs.pop('on_execute', None)
    on_completion = kwargs.pop('on_completion', None)
    preexec_fn = kwargs.pop('preexec_fn', None)
    prlimit = kwargs.pop('prlimit', None)

    if isinstance(check_exit_code, bool):
        ignore_exit_code = not check_exit_code
        check_exit_code = [0]
    elif isinstance(check_exit_code, int):
        check_exit_code = [check_exit_code]

    if kwargs:
        raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs)

    if isinstance(log_errors, six.integer_types):
        log_errors = LogErrors(log_errors)
    if not isinstance(log_errors, LogErrors):
        raise InvalidArgumentError(_('Got invalid arg log_errors: %r') %
                                   log_errors)

    if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
        if not root_helper:
            raise NoRootWrapSpecified(
                message=_('Command requested root, but did not '
                          'specify a root helper.'))
        if shell:
            # root helper has to be injected into the command string
            cmd = [' '.join((root_helper, cmd[0]))] + list(cmd[1:])
        else:
            # root helper has to be tokenized into argument list
            cmd = shlex.split(root_helper) + list(cmd)

    cmd = [str(c) for c in cmd]

    if prlimit:
        if os.name == 'nt':
            LOG.log(loglevel,
                    _('Process resource limits are ignored as '
                      'this feature is not supported on Windows.'))
        else:
            args = [sys.executable, '-m', 'oslo_concurrency.prlimit']
            args.extend(prlimit.prlimit_args())
            args.append('--')
            args.extend(cmd)
            cmd = args

    sanitized_cmd = strutils.mask_password(' '.join(cmd))

    watch = timeutils.StopWatch()
    while attempts > 0:
        attempts -= 1
        watch.restart()

        try:
            LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd)
            _PIPE = subprocess.PIPE  # pylint: disable=E1101

            if os.name == 'nt':
                on_preexec_fn = None
                close_fds = False
            else:
                on_preexec_fn = functools.partial(_subprocess_setup,
                                                  preexec_fn)
                close_fds = True

            obj = subprocess.Popen(cmd,
                                   stdin=_PIPE,
                                   stdout=_PIPE,
                                   stderr=_PIPE,
                                   close_fds=close_fds,
                                   preexec_fn=on_preexec_fn,
                                   shell=shell,
                                   cwd=cwd,
                                   env=env_variables)

            if on_execute:
                on_execute(obj)

            try:
                result = obj.communicate(process_input)

                obj.stdin.close()  # pylint: disable=E1101
                _returncode = obj.returncode  # pylint: disable=E1101
                LOG.log(loglevel, 'CMD "%s" returned: %s in %0.3fs',
                        sanitized_cmd, _returncode, watch.elapsed())
            finally:
                if on_completion:
                    on_completion(obj)

            if not ignore_exit_code and _returncode not in check_exit_code:
                (stdout, stderr) = result
                if six.PY3:
                    stdout = os.fsdecode(stdout)
                    stderr = os.fsdecode(stderr)
                sanitized_stdout = strutils.mask_password(stdout)
                sanitized_stderr = strutils.mask_password(stderr)
                raise ProcessExecutionError(exit_code=_returncode,
                                            stdout=sanitized_stdout,
                                            stderr=sanitized_stderr,
                                            cmd=sanitized_cmd)
            if six.PY3 and not binary and result is not None:
                (stdout, stderr) = result
                # Decode from the locale using using the surrogateescape error
                # handler (decoding cannot fail)
                stdout = os.fsdecode(stdout)
                stderr = os.fsdecode(stderr)
                return (stdout, stderr)
            else:
                return result

        except (ProcessExecutionError, OSError) as err:
            # if we want to always log the errors or if this is
            # the final attempt that failed and we want to log that.
            if log_errors == LOG_ALL_ERRORS or (
                    log_errors == LOG_FINAL_ERROR and not attempts):
                if isinstance(err, ProcessExecutionError):
                    format = _('%(desc)r\ncommand: %(cmd)r\n'
                               'exit code: %(code)r\nstdout: %(stdout)r\n'
                               'stderr: %(stderr)r')
                    LOG.log(loglevel, format, {"desc": err.description,
                                               "cmd": err.cmd,
                                               "code": err.exit_code,
                                               "stdout": err.stdout,
                                               "stderr": err.stderr})
                else:
                    format = _('Got an OSError\ncommand: %(cmd)r\n'
                               'errno: %(errno)r')
                    LOG.log(loglevel, format, {"cmd": sanitized_cmd,
                                               "errno": err.errno})

            if not attempts:
                LOG.log(loglevel, _('%r failed. Not Retrying.'),
                        sanitized_cmd)
                raise
            else:
                LOG.log(loglevel, _('%r failed. Retrying.'),
                        sanitized_cmd)
                if delay_on_retry:
                    time.sleep(random.randint(20, 200) / 100.0)
        finally:
            # NOTE(termie): this appears to be necessary to let the subprocess
            #               call clean something up in between calls, without
            #               it two execute calls in a row hangs the second one
            # NOTE(bnemec): termie's comment above is probably specific to the
            #               eventlet subprocess module, but since we still
            #               have to support that we're leaving the sleep.  It
            #               won't hurt anything in the stdlib case anyway.
            time.sleep(0)

Example 171

Project: solum Source File: shell.py
    def _do_build(self, ctxt, build_id, git_info, name, base_image_id,
                  source_format, image_format, assembly_id, run_cmd):
        update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.BUILDING)

        app = get_app_by_assem_id(ctxt, assembly_id)
        LOG.debug("Building app %s %s" % (app.name, app.id))

        solum.TLS.trace.clear()
        solum.TLS.trace.import_context(ctxt)

        source_uri = git_info['source_url']
        commit_sha = git_info.get('commit_sha', '')
        private = git_info.get('private', False)
        ssh_key = git_info.get('private_ssh_key', '')
        # If the repo is private, make sure private ssh key is provided
        if private and not ssh_key:
            LOG.warning("Error building due to missing private ssh key."
                        " assembly ID: %s" % assembly_id)
            job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
                                    description='private ssh key missing',
                                    assembly_id=assembly_id)
            update_assembly_status(ctxt, assembly_id,
                                   ASSEMBLY_STATES.ERROR)
            return

        image_tag = ''
        lp_access = ''
        if base_image_id != 'auto':
            image = objects.registry.Image.get_lp_by_name_or_uuid(
                ctxt, base_image_id, include_operators_lp=True)
            if (not image or not image.project_id or not image.status or
                    not image.external_ref or not image.docker_image_name or
                    image.status.lower() != 'ready'):
                LOG.warning("Error building due to language pack not ready."
                            " assembly ID: %s" % assembly_id)
                job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
                                        description='language pack not ready',
                                        assembly_id=assembly_id)
                update_assembly_status(ctxt, assembly_id,
                                       ASSEMBLY_STATES.ERROR)
                return
            base_image_id = image.external_ref
            image_tag = image.docker_image_name
            lp_access = get_lp_access_method(image.project_id)

        build_cmd = self._get_build_command(ctxt, 'build', source_uri,
                                            name, base_image_id,
                                            source_format, image_format,
                                            commit_sha,
                                            lp_image_tag=image_tag)
        solum.TLS.trace.support_info(build_cmd=' '.join(build_cmd),
                                     assembly_id=assembly_id)

        user_env = {}
        try:
            user_env = self._get_environment(ctxt,
                                             git_info,
                                             assembly_id=assembly_id,
                                             run_cmd=run_cmd,
                                             lp_access=lp_access)
        except exception.SolumException as env_ex:
            LOG.exception(env_ex)
            job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
                                    description=str(env_ex),
                                    assembly_id=assembly_id)

        log_env = user_env.copy()
        if 'OS_AUTH_TOKEN' in log_env:
            del log_env['OS_AUTH_TOKEN']
        if 'OPER_AUTH_TOKEN' in log_env:
            del log_env['OPER_AUTH_TOKEN']
        if 'OPER_OS_STORAGE_URL' in log_env:
            del log_env['OPER_OS_STORAGE_URL']
        solum.TLS.trace.support_info(environment=log_env)

        job_update_notification(ctxt, build_id, IMAGE_STATES.BUILDING,
                                description='Starting the image build',
                                assembly_id=assembly_id)
        # TODO(datsun180b): Associate log with assembly properly
        logpath = "%s/%s-%s.log" % (user_env['SOLUM_TASK_DIR'],
                                    'build',
                                    user_env['BUILD_ID'])
        LOG.debug("Build logs for app %s stored at %s" % (app.name, logpath))
        out = None
        assem = None
        if assembly_id is not None:
            assem = get_assembly_by_id(ctxt, assembly_id)
            if assem.status == ASSEMBLY_STATES.DELETING:
                return

        try:
            out = subprocess.Popen(build_cmd,
                                   env=user_env,
                                   stdout=subprocess.PIPE).communicate()[0]
        except (OSError, ValueError) as subex:
            LOG.exception(subex)
            job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
                                    description=str(subex),
                                    assembly_id=assembly_id)
            update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
            return

        if assem is not None:
            assem.type = 'app'
            wf = objects.registry.Workflow.get_by_assembly_id(assem.id)
            upload_task_log(ctxt, logpath, assem, wf.id, 'build')

        '''
        we expect two lines in the output that looks like:
        created_image_id=<location of DU>
        docker_image_name=<DU name>
        The DU location is:
        DU's swift tempUrl if backend is 'swift';
        DU's UUID in glance if backend is 'glance';
        DU's docker registry location if backend is 'docker_registry'
        '''
        du_image_loc = None
        docker_image_name = None
        for line in out.split('\n'):
            # Won't break out until we get the final
            # matching which is the expected value
            if line.startswith('created_image_id'):
                solum.TLS.trace.support_info(build_out_line=line)
                du_image_loc = line.replace('created_image_id=', '').strip()
            elif line.startswith('docker_image_name'):
                docker_image_name = line.replace('docker_image_name=', '')

        if not du_image_loc or not docker_image_name:
            job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
                                    description='image not created',
                                    assembly_id=assembly_id)
            update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
            return
        else:
            job_update_notification(ctxt, build_id, IMAGE_STATES.READY,
                                    description='built successfully',
                                    created_image_id=du_image_loc,
                                    docker_image_name=docker_image_name,
                                    assembly_id=assembly_id)
            update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.BUILT)
            return (du_image_loc, docker_image_name)

Example 172

Project: zuul Source File: command.py
def zuul_run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None):
    '''
    Execute a command, returns rc, stdout, and stderr.

    :arg args: is the command to run
        * If args is a list, the command will be run with shell=False.
        * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
        * If args is a string and use_unsafe_shell=True it runs with shell=True.
    :kw check_rc: Whether to call fail_json in case of non zero RC.
        Default False
    :kw close_fds: See docuementation for subprocess.Popen(). Default True
    :kw executable: See docuementation for subprocess.Popen(). Default None
    :kw data: If given, information to write to the stdin of the command
    :kw binary_data: If False, append a newline to the data.  Default False
    :kw path_prefix: If given, additional path to find the command in.
        This adds to the PATH environment vairable so helper commands in
        the same directory can also be found
    :kw cwd: If given, working directory to run the command inside
    :kw use_unsafe_shell: See `args` parameter.  Default False
    :kw prompt_regex: Regex string (not a compiled regex) which can be
        used to detect prompts in the stdout which would otherwise cause
        the execution to hang (especially if no input data is specified)
    :kwarg environ_update: dictionary to *update* os.environ with
    '''

    shell = False
    if isinstance(args, list):
        if use_unsafe_shell:
            args = " ".join([pipes.quote(x) for x in args])
            shell = True
    elif isinstance(args, (str, unicode)) and use_unsafe_shell:
        shell = True
    elif isinstance(args, (str, unicode)):
        # On python2.6 and below, shlex has problems with text type
        # ZUUL: Hardcode python2 until we're on ansible 2.2
        if isinstance(args, unicode):
            args = args.encode('utf-8')
        args = shlex.split(args)
    else:
        msg = "Argument 'args' to run_command must be list or string"
        self.fail_json(rc=257, cmd=args, msg=msg)

    prompt_re = None
    if prompt_regex:
        try:
            prompt_re = re.compile(prompt_regex, re.MULTILINE)
        except re.error:
            self.fail_json(msg="invalid prompt regular expression given to run_command")

    # expand things like $HOME and ~
    if not shell:
        args = [ os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None ]

    rc = 0
    msg = None
    st_in = None

    # Manipulate the environ we'll send to the new process
    old_env_vals = {}
    # We can set this from both an attribute and per call
    for key, val in self.run_command_environ_update.items():
        old_env_vals[key] = os.environ.get(key, None)
        os.environ[key] = val
    if environ_update:
        for key, val in environ_update.items():
            old_env_vals[key] = os.environ.get(key, None)
            os.environ[key] = val
    if path_prefix:
        old_env_vals['PATH'] = os.environ['PATH']
        os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])

    # If using test-module and explode, the remote lib path will resemble ...
    #   /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
    # If using ansible or ansible-playbook with a remote system ...
    #   /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py

    # Clean out python paths set by ansiballz
    if 'PYTHONPATH' in os.environ:
        pypaths = os.environ['PYTHONPATH'].split(':')
        pypaths = [x for x in pypaths \
                    if not x.endswith('/ansible_modlib.zip') \
                    and not x.endswith('/debug_dir')]
        os.environ['PYTHONPATH'] = ':'.join(pypaths)
        if not os.environ['PYTHONPATH']:
            del os.environ['PYTHONPATH']

    # create a printable version of the command for use
    # in reporting later, which strips out things like
    # passwords from the args list
    to_clean_args = args
    # ZUUL: Hardcode python2 until we're on ansible 2.2
    if isinstance(args, (unicode, str)):
        to_clean_args = shlex.split(to_clean_args)

    clean_args = []
    is_passwd = False
    for arg in to_clean_args:
        if is_passwd:
            is_passwd = False
            clean_args.append('********')
            continue
        if PASSWD_ARG_RE.match(arg):
            sep_idx = arg.find('=')
            if sep_idx > -1:
                clean_args.append('%s=********' % arg[:sep_idx])
                continue
            else:
                is_passwd = True
        arg = heuristic_log_sanitize(arg, self.no_log_values)
        clean_args.append(arg)
    clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)

    if data:
        st_in = subprocess.PIPE

    # ZUUL: changed stderr to follow stdout
    kwargs = dict(
        executable=executable,
        shell=shell,
        close_fds=close_fds,
        stdin=st_in,
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
    )

    if cwd and os.path.isdir(cwd):
        kwargs['cwd'] = cwd

    # store the pwd
    prev_dir = os.getcwd()

    # make sure we're in the right working directory
    if cwd and os.path.isdir(cwd):
        try:
            os.chdir(cwd)
        except (OSError, IOError):
            e = get_exception()
            self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))

    try:

        if self._debug:
            if isinstance(args, list):
                running = ' '.join(args)
            else:
                running = args
            self.log('Executing: ' + running)
        # ZUUL: Replaced the excution loop with the zuul_runner run function
        cmd = subprocess.Popen(args, **kwargs)
        t = threading.Thread(target=follow, args=(cmd.stdout,))
        t.daemon = True
        t.start()
        ret = cmd.wait()
        # Give the thread that is writing the console log up to 10 seconds
        # to catch up and exit.  If it hasn't done so by then, it is very
        # likely stuck in readline() because it spawed a child that is
        # holding stdout or stderr open.
        t.join(10)
        with Console() as console:
            if t.isAlive():
                console.addLine("[Zuul] standard output/error still open "
                                "after child exited")
            console.addLine("[Zuul] Task exit code: %s\n" % ret)

        # ZUUL: If the console log follow thread *is* stuck in readline,
        # we can't close stdout (attempting to do so raises an
        # exception) , so this is disabled.
        # cmd.stdout.close()

        # ZUUL: stdout and stderr are in the console log file
        stdout = ''
        stderr = ''

        rc = cmd.returncode
    except (OSError, IOError):
        e = get_exception()
        self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args)
    except Exception:
        e = get_exception()
        self.fail_json(rc=257, msg=str(e), exception=traceback.format_exc(), cmd=clean_args)

    # Restore env settings
    for key, val in old_env_vals.items():
        if val is None:
            del os.environ[key]
        else:
            os.environ[key] = val

    if rc != 0 and check_rc:
        msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
        self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)

    # reset the pwd
    os.chdir(prev_dir)

    return (rc, stdout, stderr)

Example 173

Project: tcollector Source File: zfsiostats.py
def main():
    """zfsiostats main loop"""
    global signal_received

    collection_interval=DEFAULT_COLLECTION_INTERVAL
    report_capacity_every_x_times=DEFAULT_REPORT_CAPACITY_EVERY_X_TIMES
    report_disks_in_vdevs=DEFAULT_REPORT_DISKS_IN_VDEVS
    if(zfsiostats_conf):
        config = zfsiostats_conf.get_config()
        collection_interval=config['collection_interval']
        report_capacity_every_x_times=config['report_capacity_every_x_times']
        report_disks_in_vdevs=config['report_disks_in_vdevs']

    signal.signal(signal.SIGTERM, handlesignal)
    signal.signal(signal.SIGINT, handlesignal)

    try:
        p_zpool = subprocess.Popen(
            ["zpool", "iostat", "-v", str(collection_interval)],
            stdout=subprocess.PIPE,
        )
    except OSError, e:
        if e.errno == errno.ENOENT:
            # it makes no sense to run this collector here
            sys.exit(13) # we signal tcollector to not run us
        raise

    firstloop = True
    report_capacity = (report_capacity_every_x_times-1)
    lastleg = 0
    ltype = None
    timestamp = int(time.time())
    capacity_stats_pool = {}
    capacity_stats_device = {}
    io_stats_pool = {}
    io_stats_device = {}
    start_re = re.compile(".*capacity.*operations.*bandwidth")
    headers_re = re.compile(".*pool.*alloc.*free.*read.*write.*read.*write")
    separator_re = re.compile(".*-----.*-----.*-----")
    while signal_received is None:
        try:
            line = p_zpool.stdout.readline()
        except (IOError, OSError), e:
            if e.errno in (errno.EINTR, errno.EAGAIN):
                break
            raise

        if not line:
            # end of the program, die
            break

        if start_re.match(line):
            assert ltype in (None, T_EMPTY), \
                "expecting last state T_EMPTY or None, now got %s" % ltype
            ltype = T_START
        elif headers_re.match(line):
            assert ltype == T_START, \
                "expecting last state T_START, now got %s" % ltype
            ltype = T_HEADERS
        elif separator_re.match(line):
            assert ltype in (T_DEVICE, T_HEADERS), \
                "expecting last state T_DEVICE or T_HEADERS, now got %s" % ltype
            ltype = T_SEPARATOR
        elif len(line) < 2:
            assert ltype == T_SEPARATOR, \
                "expecting last state T_SEPARATOR, now got %s" % ltype
            ltype = T_EMPTY
        elif line.startswith("  mirror"):
            assert ltype in (T_POOL, T_DEVICE), \
                "expecting last state T_POOL or T_DEVICE, now got %s" % ltype
            ltype = T_LEG
        elif line.startswith("  "):
            assert ltype in (T_POOL, T_DEVICE, T_LEG), \
                "expecting last state T_POOL or T_DEVICE or T_LEG, now got %s" % ltype
            ltype = T_DEVICE
        else:
            # must be a pool name
            #assert ltype == T_SEPARATOR, \
            #    "expecting last state T_SEPARATOR, now got %s" % ltype
            if ltype == T_SEPARATOR:
                parentpoolname = ""
            ltype = T_POOL

        if ltype == T_START:
            for x in (
                      capacity_stats_pool, capacity_stats_device,
                      io_stats_pool, io_stats_device,
                      ):
                x.clear()
            timestamp = int(time.time())

        elif ltype == T_POOL:
            line = line.strip()
            poolname, s_df, s_io = extract_info(line,report_disks_in_vdevs)
            if parentpoolname == "":
                parentpoolname = poolname
            else:
                poolname=parentpoolname+"."+poolname
            capacity_stats_pool[poolname] = s_df
            io_stats_pool[poolname] = s_io
            # marker for leg
            last_leg = 0

        elif ltype == T_LEG:
            last_leg = last_leg + 1
            line = line.strip()
            devicename, s_df, s_io = extract_info(line,report_disks_in_vdevs)
            capacity_stats_device["%s %s%s" % (poolname, devicename, last_leg)] = s_df
            io_stats_device["%s %s%s" % (poolname, devicename, last_leg)] = s_io

        elif ltype == T_DEVICE:
            line = line.strip()
            devicename, s_df, s_io = extract_info(line,report_disks_in_vdevs)
            capacity_stats_device["%s %s" % (poolname, devicename)] = s_df
            io_stats_device["%s %s" % (poolname, devicename)] = s_io

        elif ltype == T_EMPTY:
            if report_capacity_every_x_times > 0:
                report_capacity += 1
            if report_capacity == report_capacity_every_x_times:
                report_capacity=0
                for poolname, stats in capacity_stats_pool.items():
                    fm = "zfs.df.pool.kb.%s %d %s pool=%s"
                    for statname, statnumber in stats.items():
                        print fm % (statname, timestamp, statnumber, poolname)
                for devicename, stats in capacity_stats_device.items():
                    fm = "zfs.df.device.kb.%s %d %s device=%s pool=%s"
                    poolname, devicename = devicename.split(" ", 1)
                    for statname, statnumber in stats.items():
                        print fm % (statname, timestamp, statnumber,
                                    devicename, poolname)
            if firstloop:
                # this flag prevents printing out of the data in the first loop
                # which is a since-boot summary similar to iostat
                # and is useless to us
                firstloop = False
            else:
                for poolname, stats in io_stats_pool.items():
                    fm = "zfs.io.pool.%s %d %s pool=%s"
                    for statname, statnumber in stats.items():
                        print fm % (statname, timestamp, statnumber, poolname)
                for devicename, stats in io_stats_device.items():
                    fm = "zfs.io.device.%s %d %s device=%s pool=%s"
                    poolname, devicename = devicename.split(" ", 1)
                    for statname, statnumber in stats.items():
                        print fm % (statname, timestamp, statnumber,
                                    devicename, poolname)
            sys.stdout.flush()

    if signal_received is None:
        signal_received = signal.SIGTERM
    try:
        os.kill(p_zpool.pid, signal_received)
    except Exception:
        pass
    p_zpool.wait()
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Page 4 Selected