mocks.mocks.MockHDCamera

Here are the examples of the python api mocks.mocks.MockHDCamera taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

1 Examples 7

0 Source : smartcam.py
with MIT License
from georgeslabreche

def run_experiment():
    """Run the experiment."""

    # WARNING:  The logger has not yet been initialized.
    #           Make sure that no logging happens until we have initialized the logger.
    #           We are doing this because in case we are have log files left over from previous runs then we want to tar and downlink those
    #           before we start this experiment run and in that process we don't want to include logs created during this experiment's run.
    #
    # FIXME:    Come up with a more elegant solution so that we can start logging right away.
    #           Maybe just filter out this run's log from the tarring process based on its timestamped filename.

    # At this point only instanciate classes that:
    #   1) Are required to package files from previous runs that may have been left over due to an abrupt termination of a previous run.
    #   2) Do not produce any logs so that logs created for this experiment's run are not packaged with files leftover from previous run(s).

    # The config parser.
    cfg = AppConfig()

    # The utils object.
    utils = Utils()
    
    # Instanciate a compressor object if a compression algorithm was specified and configured in the config.ini.
    raw_compressor = None

    # Raw image file compression will only be applied if we enable compressed raw downlinking.
    if cfg.downlink_compressed_raws and cfg.raw_compression_type == 'fapec' and cfg.init_compression_fapec_props() is True:

        # Instanciate compression object that will be used to compress the raw image files.
        raw_compressor = Fapec(cfg.compression_fapec_chunk,\
            cfg.compression_fapec_threads,\
            cfg.compression_fapec_dtype,\
            cfg.compression_fapec_band,\
            cfg.compression_fapec_losses,\
            cfg.compression_fapec_meaningful_bits,\
            cfg.compression_fapec_lev)

    # Two cases that would cause files that have not been downlinked and we want to downlink them now:
    #
    #   1) If the experiment was terminated in a previous run before it had a chance to exit the image acquisition loop then we might have some logs and images that weren't tarred and moved for downlink.
    #      Check if these files exist before starting the experiment and move them to the filestore's toGround folder for downlinking.
    #
    #   2) If previous runs had downlink_thumbnails set to "no" in the config.ini but now that conig parameter is set to "yes".
    #      We first want to downlink the past thumbnails so that this experiment run can package its own thumbnails that do not include those from previous runs.
    #
    # IMPORTANT: Do this before we start logging for the current run or else this run's log will be included in the previous run(s)' tar and downlink.
    prev_run_tar_jpeg = False
    prev_run_tar_raws = False

    # Package thumbnails for downlinking.
    if cfg.downlink_thumbnails:
        tar_path = utils.package_files_for_downlinking("jpeg", cfg.downlink_log_if_no_images, cfg.do_clustering, START_TIME, True, False)

        if tar_path is not None:
            # Use this flag to log later so that we don't create a new log file now that will end up being packaged if we are also tarring raw image files generated in previous runs.
            prev_run_tar_jpeg = True

            # Split and move tar to filestore's toGround folder.
            utils.split_and_move_tar(tar_path, cfg.downlink_compressed_split)

    # Package compressed raws for downlinking.
    if cfg.downlink_compressed_raws and raw_compressor is not None:
        tar_path = utils.package_files_for_downlinking(cfg.raw_compression_type, cfg.downlink_log_if_no_images, cfg.do_clustering, START_TIME, True, False)

        if tar_path is not None:
            # This is not necessary here since ther eis no more tarring of previous files after this point but kept this way for consistency.
            prev_run_tar_raws = True

            # Split and move tar to filestore's toGround folder.
            utils.split_and_move_tar(tar_path, cfg.downlink_compressed_split)


    # WARNING:  Logging is only initialized here.
    #           Prior to this point attempts to log anything will result in an error.
    #           Now we can start logging for this experiment's run. Init and configure the logger.
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    logging.Formatter.converter = time.gmtime

    # Make sure that the logger object being set is the global logger variable.
    global logger
    logger = setup_logger('smartcam_logger', LOG_FILE, formatter, level=logging.INFO)


    # If files were left over from previous experiment runs they they were tarred, split, and moved for downlinking.
    # Log this operation. This creates the first log entries for this experiment's run.
    if prev_run_tar_jpeg:
        logger.info("Tarred for downlink the thumbnail and/or log files from the previous run(s).")

    if prev_run_tar_raws:
        logger.info("Tarred for downlink the compressed raw and/or log files from previous run(s).")

    # Instanciate remaining required classes.
    camera = HDCamera(cfg.cam_gains, cfg.cam_exposure) if not DEBUG else MockHDCamera()

    img_editor = ImageEditor()
    img_classifier = ImageClassifier()
    

    # This initializations will write in the log file in case of exception.
    # So we make sure they are initialize after we've packaged logs that may remain from previous runs.
    # If there were no files remaining from previous files then exceptions tha tmay be thrown here will be the first log entries for this experiment's run.
    img_metadata = None
    
    if DEBUG:
        img_metadata = MockImageMetaData(BASE_PATH, cfg.tle_path, cfg.cam_gains, cfg.cam_exposure)
    else:
        img_metadata = ImageMetaData(BASE_PATH, cfg.tle_path, cfg.cam_gains, cfg.cam_exposure)
    
    geojson_utils = GeoJsonUtils(cfg.gen_geojson)

    # Default immage acquisition interval. Can be throttled when an acquired image is labeled to keep.
    image_acquisition_period = cfg.gen_interval_default

    # Image acquisition loop flag and counter to keep track.
    done = False
    counter = 0

    # Flag indicating whether or not we should skip the image acquisition and labeling process
    # We want to skip in case some criteria is not met or in case we encounter an error.
    success = True

    # Error counter.
    # Exit image acquisition loop when the maximum error count is reached.
    # The maxiumum error count is set in the config.ini.
    error_count = 0

    # Image acquisition loop.
    while not done:

        # The .done file exists if a stop experiment command was issued.
        # Exit the image acquisition loop if it exists.
        if os.path.exists(STOP_FILE):
            logger.info("Stop experiment triggered: exiting the image acquisition loop and shutting down the app.")
            done = True
            break

        # Use the existance of a png file as an inficator on whether or not an image was successfully acquired.
        file_png = None

        # If the previous image acquisition loop iteration was skipped due to an error.
        if not success:

            # Increment error counter.
            error_count = error_count + 1
            
            # Reset the image acquisition period to the default value.
            # Do this in case the period was throttled in the previous iteration of the image acquisition loop.
            image_acquisition_period = cfg.gen_interval_default

            if error_count >= cfg.max_error_count:
                # Maximum error count reached. Exit image acquisition loop to terminate application.
                logger.info("Exit image acquisition loop: reached maximum error count.")
                break
       
        # Start of a new image acquisition loop iteration. Assume success.
        success = True

        # Init keep image flag indicating if we kepp the image.
        keep_image = False

        # Cleanup any files that may have been left over from a previous run that may have terminated ungracefully.
        # Skip image acquisition in case of file deletion failure.
        if utils.cleanup()   <   0:
            success = False

        # Check if the experiment's toGround folder is below a configured quota before proceeding with image acquisition.
        try:
            toGround_size = int(subprocess.check_output(['du', '-s', TOGROUND_PATH]).decode('utf-8').split()[0])
            done = True if toGround_size >= cfg.quota_toGround else False

            if done:
                # Exit the image acquisition loop in case toGround disk size is too large.
                logger.info("Exiting: the experiment's toGround folder disk usage is greater than the configured quota: {TG} KB > {Q} KB.".format(\
                    TG=toGround_size,\
                    Q=cfg.quota_toGround))

                # Break out the image acquisition loop.
                break

        except:
            # Exit the image acquisition loop in case of exception.
            logger.exception("Exiting: failed to check disk space use of the experiment's toGround folder.")
            break

        try:
            # If the image acquisition type is AOI then only acquire an image if the spacecraft is located above an area of interest.
            # Areas of interests are defined as geophraphic shapes represented by polygons listed in the GeoJSON file.
            if cfg.gen_type == GEN_TYPE_AOI:

                # Assumptions for AOI image acquisition mode.
                is_daytime = False
                is_in_aoi = False
                
                try:
                    # Get the coordinates of the spacecraft's current groundtrack position.
                    coords = img_metadata.get_groundtrack_coordinates()

                    # Proceed if groundtrack coordinates successfully fetched.
                    if coords is not None:

                        # Find out if it is daytime at the point directly below the spacecraft (i.e. at the point coordinate of the spacecraft's groundtrack position).
                        is_daytime = img_metadata.is_daytime(coords['lat'], coords['lng'], coords['dt'])
                    
                        # If the groundtrack coordinates are above a point on Earth's surface where it is daylight then proceed in checking if we are above an area of interest.
                        if is_daytime:

                            # Check if the spacecraft is above an area of interest.
                            # Continue with the image acquisition if it is by setting the success flag to True.
                            is_in_aoi = geojson_utils.is_point_in_polygon(coords['lat'] / ephem.degree, coords['lng'] / ephem.degree)

                    else:
                        # Skip this image acquisition loop if ground track coordinates not fetched.
                        logger.error("Skipping image acquisition: failed to fetch groundtrack coordinates.")
                        success = False

                except:
                    # Skip this image acquisition loop if an unexpected exception occurred.
                    logger.exception("Failed to acquire image based on geographic area of interest.")
                    success = False

                # Acquire an image is the spacecraft is above an AOI during daytime.
                if success and is_daytime and is_in_aoi:
                    # Acquire the image.
                    file_png = camera.acquire_image()

                    # Check if image acquisition was OK.
                    success = True if file_png is not None else False

            else: # If the image acquisition type is polling (as opposed to AOI).

                # Acquire the image.
                file_png = camera.acquire_image()

                # Check if image acquisition was OK.
                success = True if file_png is not None else False

            # Proceed if successfully acquired and image.
            if success and file_png is not None:
            
                # If we have successfully acquired a png file then create the  jpeg thumbnail if we want to downlink thumbnails.
                if cfg.downlink_thumbnails:
                    # The thumbnail filename.
                    file_thumbnail = file_png.replace(".png", "_thumbnail.jpeg")
                    
                    # Create the thumbnail.
                    success = img_editor.create_thumbnail(file_png, file_thumbnail, cfg.jpeg_scaling, cfg.jpeg_quality)

                # Proceed if we have successfully create the thumbnail image.
                if success:

                    # Set the first image classification model to apply.
                    next_model = cfg.entry_point_model

                    # Keep applying follow up models to the kept image as long as images are labeled to be kept and follow up models are defined.
                    while next_model is not None:

                        # Assuming the image will not be kept until we get the final result from the last model in the pipeline.
                        keep_image = False

                        # Init the model configuration properties for the current model.
                        success, model_type = cfg.init_model_props(next_model)

                        # Check that the model section exists in the configuration file before proceeding.
                        if not success:
                            logger.error("Skipping the '{M}' model: it is not defined in the config.ini file.".format(M=next_model))
                            break

                        else:
                            # Logging which model in the pipeline is being used to classify the image
                            logger.info("Labeling the image using the '{M}' model.".format(M=next_model))

                        
                        # Determine image input that will be fed into the model.
                        file_image_input = None

                        if model_type == MODEL_TYPE_TF_LITE:
                            # File name of the image file that will be used as the input image to feed the image classification model.
                            file_image_input = file_png.replace(".png", "_input.jpeg")

                            # Create the image that will be used as the input for the neural network image classification model.
                            # Downsample it from the thumbnail image that was previously created.
                            success = img_editor.create_input_image(file_thumbnail, file_image_input, cfg.input_height, cfg.input_width, cfg.jpeg_quality)

                        elif model_type == MODEL_TYPE_EXEC_BIN:
                            if cfg.input_format == "ims_rgb":
                                file_image_input = file_png.replace(".png", ".ims_rgb")

                            elif cfg.input_format == "png":
                                file_image_input = file_png

                            elif cfg.input_format == "jpeg":
                                file_image_input = file_thumbnail
                                
                            # Flag if image input was successfully set.
                            if file_image_input is not None:
                                success = True
                            
                        else:
                            success = False

                        # Input image for the model was successfully created, proceed with running the image classification program.
                        if success:

                            # Label the image with predictions
                            predictions_dict = None

                            if model_type == MODEL_TYPE_TF_LITE:
                                predictions_dict = img_classifier.label_image_with_tf_model(\
                                    file_image_input, cfg.tflite_model, cfg.file_labels,\
                                    cfg.input_height, cfg.input_width, cfg.input_mean, cfg.input_std)

                            elif model_type == MODEL_TYPE_EXEC_BIN:
                                predictions_dict = img_classifier.label_image_with_exec_bin(\
                                    file_image_input, cfg.bin_model, cfg.write_mode, cfg.args)

                            # Break out of the loop if the image classification program returns an error.
                            if predictions_dict is None:

                                # Break out of the loop.
                                break

                            # Fetch image classification result if the image classification program doesn't return an error code.
                            elif predictions_dict:

                                # Get label with highest prediction confidence.
                                applied_label = max(predictions_dict.items(), key=operator.itemgetter(1))[0]
                                
                                # Get the confidence value of the label with the higher confidence.
                                applied_label_confidence = float(predictions_dict[applied_label])

                                # If the image classification is not greater or equal to a certain threshold then discard it.
                                if applied_label_confidence  <  float(cfg.confidence_threshold):
                                    logger.info("Insufficient prediction confidence level to label the image (the threshold is currently set to " + cfg.confidence_threshold + ").")

                                    # Break out of the loop if the prediction confidence is not high enough and we cannot proceed in labeling the image.
                                    break
                                
                                else:
                                    # Log highest confidence prediction.
                                    logger.info("Labeling the image as '" + applied_label + "'.")

                                    # Determine if we are keeping the image and if we are applying another classification model to it.
                                    # If next_model is not None then proceed to another iteration of this model pipeline loop.
                                    keep_image, next_model = utils.get_image_keep_status_and_next_model(applied_label, cfg.labels_keep)


                    # We have exited the model pipeline loop.
                    
                    # Collect image metadata. Even for images that will not be kept.
                    if predictions_dict is not None and cfg.collect_metadata:
                        metadata = img_metadata.collect_metadata(file_png, applied_label, applied_label_confidence, keep_image)
                        
                        # Write metadata to a CSV file.
                        if metadata is not None:
                            img_metadata.write_metadata(METADATA_CSV_FILE, metadata)

                    # Remove the image if it is not labeled for keeping.
                    if not keep_image:
                        # Log image removal.
                        logger.info("Ditching the image.")

                        # The acquired image is not of interest: fall back the default image acquisition frequency.
                        image_acquisition_period = cfg.gen_interval_default

                        # Remove image.
                        utils.cleanup()
                    
                    # Move the image to the experiment's toGround folder if we have gone through all the
                    # models in the pipeline and still have an image that is labeled to keep for downlinking.
                    else:

                        # The current image has been classified with a label of interest.
                        # Keep the image but only the types as per what is configured in the the config.ini file.
                        logger.info("Keeping the image.")

                        # Compress raw image if configured to do so.
                        if raw_compressor is not None:

                            # Log message to indicate compression.
                            logger.info("Compressing the raw image.")
                            
                            # Source and destination file paths for raw image file compression.
                            file_raw_image = file_png.replace(".png", ".ims_rgb")
                            file_raw_image_compressed = TOGROUND_PATH + "/" + applied_label + "/" + ntpath.basename(file_png).replace(".png", "." + cfg.raw_compression_type)

                            # Create a label directory in the experiment's toGround directory.
                            # This is where the compressed raw image file will be moved to and how we categorize images based on their predicted labels.
                            toGround_label_dir = TOGROUND_PATH + '/' + applied_label
                            if not os.path.exists(toGround_label_dir):
                                os.makedirs(toGround_label_dir)

                            # Compress the raw image file.
                            raw_compressor.compress(file_raw_image, file_raw_image_compressed)

                        # Move the images we want to keep into the experimenter's toGround folder.
                        utils.move_images_for_keeping(cfg.raw_keep, cfg.png_keep, applied_label)

                        # An image of interest has been acquired: throttle image acquisition frequency.
                        image_acquisition_period = cfg.gen_interval_throttle

        except:
            # In case of exception just log the stack trace and proceed to the next image acquisition iteration.
            logger.exception("Failed to acquire and classify image.")

        # Error handling here to not risk an unlikely infinite loop.
        try:

            # Flag indicating if AOI image was acquired. Use this flag to determine if the loop counter gets incremented or not.
            # If image aquisition is set to AOI but an image is not acquired then don't increment the counter for this iteration.
            # This is because in AOI mode the maximum counter value is the total number of images we want to acquire rather than
            # the maximum number of labelled images (as is the case for the Looping mode for image aquisition).
            if cfg.gen_type == GEN_TYPE_AOI:
                if keep_image:
                    counter = counter + 1

            else: # Increment image acquisition labeling counter for the polling mode.
                counter = counter + 1


            # Wait the configured sleep time before proceeding to the next image acquisition and labeling.
            if counter  <  cfg.gen_number:

                # Don't span the log in case of a long run for image acquisition type "aoi".
                if cfg.gen_type != GEN_TYPE_AOI:
                    logger.info("Wait {T} seconds...".format(T=image_acquisition_period))

                time.sleep(image_acquisition_period)
            else:
                logger.info("Image acquisition loop completed.")
            
            # Keep looping until the target iteration count is reached.
            if counter >= cfg.gen_number:
                done = True
            else:
                done = False

        except:
            # An unlikely exception is preventing the loop counter to increment.
            # Log the exception and exit the loop.
            logger.exception("An unlikely failure occured while waiting for the next image acquisition.")
            done = True
            

    # We have exited the image acquisition and labeling loop.
    # This means that we have finished labeling the acquired images. 

    # Do image clustering if enabled to do so in the config file.
    # WARNING: if auto thumbnail downlink is not enabled then the collected training data will include duplicate images.
    if cfg.do_clustering:
        img_classifier.cluster_labeled_images(cfg.cluster_for_labels, cfg.cluster_k, cfg.cluster_collect_threshold, cfg.cluster_img_types)

    # Log some housekeeping data.
    # Make sure this is done before packaging files for downlinking.
    utils.log_housekeeping_data()

    # Tar the images and the log files for downlinking.

    # Package thumbnails for downlinking.
    if cfg.downlink_thumbnails:
        tar_path = utils.package_files_for_downlinking("jpeg", cfg.downlink_log_if_no_images, cfg.do_clustering, START_TIME, False, True)

        if tar_path is not None:
            utils.split_and_move_tar(tar_path, cfg.downlink_compressed_split)

    # Package compressed raws for downlinking.
    if cfg.downlink_compressed_raws and raw_compressor is not None:
        tar_path = utils.package_files_for_downlinking(cfg.raw_compression_type, cfg.downlink_log_if_no_images, cfg.do_clustering, START_TIME, False, True)

        if tar_path is not None:
            utils.split_and_move_tar(tar_path, cfg.downlink_compressed_split)

    # Clean things up.
    utils.cleanup()

    # Last operation before exiting the app: remove the hidden .stop file if it exists.
    # The .stop file is created when the stopExperiment command invokes the stop_exp1000.sh script.
    # The .stop file serves as a flag that signals the app to break out of the image acquisition loop so that the app can terminate.
    # If the .stop file is not removed then the experiment will exit the image acquisition loop as soon as it enters it.
    # Checking for the .stop file and removing it is also done when starting the app, just in case the app was ungracefully shutdown
    # during its previous run.
    if os.path.exists(STOP_FILE):
        os.remove(STOP_FILE)


def setup_logger(name, log_file, formatter, level=logging.INFO):