numpy.uint8

Here are the examples of the python api numpy.uint8 taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

168 Examples 7

Example 101

Project: rasterio Source File: test_warp.py
def test_reproject_init_nodata_tofile(tmpdir):
    """Test that nodata is being initialized."""
    params = default_reproject_params()

    tiffname = str(tmpdir.join('foo.tif'))

    source1 = np.zeros((params.width, params.height), dtype=np.uint8)
    source2 = source1.copy()

    # fill both sources w/ arbitrary values
    rows, cols = source1.shape
    source1[:rows / 2, :cols / 2] = 200
    source2[rows / 2:, cols / 2:] = 100

    kwargs = {
        'count': 1,
        'width': params.width,
        'height': params.height,
        'dtype': np.uint8,
        'driver': 'GTiff',
        'crs': params.dst_crs,
        'transform': params.dst_transform
    }

    with rasterio.open(tiffname, 'w', **kwargs) as dst:
        reproject(
            source1,
            rasterio.band(dst, 1),
            src_transform=params.src_transform,
            src_crs=params.src_crs,
            src_nodata=0.0,
            dst_transform=params.dst_transform,
            dst_crs=params.dst_crs,
            dst_nodata=0.0
        )

        # 200s should be overwritten by 100s
        reproject(
            source2,
            rasterio.band(dst, 1),
            src_transform=params.src_transform,
            src_crs=params.src_crs,
            src_nodata=0.0,
            dst_transform=params.dst_transform,
            dst_crs=params.dst_crs,
            dst_nodata=0.0
        )

    with rasterio.open(tiffname) as src:
        assert src.read().max() == 100

Example 102

Project: neural-network-animation Source File: image_util.py
@deprecated('1.4.0')
def autocontrast(image, cutoff=0):
    """
    Maximize image contrast, based on histogram.  This completely
    ignores the alpha channel.
    """
    assert image.dtype == np.uint8

    output_image = np.empty((image.shape[0], image.shape[1], 3), np.uint8)

    for i in xrange(0, 3):
        plane = image[:,:,i]
        output_plane = output_image[:,:,i]
        h = np.histogram(plane, bins=256)[0]
        if cutoff:
            # cut off pixels from both ends of the histogram
            # get number of pixels
            n = 0
            for ix in xrange(256):
                n = n + h[ix]
            # remove cutoff% pixels from the low end
            cut = n * cutoff / 100
            for lo in range(256):
                if cut > h[lo]:
                    cut = cut - h[lo]
                    h[lo] = 0
                else:
                    h[lo] = h[lo] - cut
                    cut = 0
                if cut <= 0:
                    break
            # remove cutoff% samples from the hi end
            cut = n * cutoff / 100
            for hi in xrange(255, -1, -1):
                if cut > h[hi]:
                    cut = cut - h[hi]
                    h[hi] = 0
                else:
                    h[hi] = h[hi] - cut
                    cut = 0
                if cut <= 0:
                    break

        # find lowest/highest samples after preprocessing
        for lo in xrange(256):
            if h[lo]:
                break
        for hi in xrange(255, -1, -1):
            if h[hi]:
                break

        if hi <= lo:
            output_plane[:,:] = plane
        else:
            scale = 255.0 / (hi - lo)
            offset = -lo * scale
            lut = np.arange(256, dtype=np.float)
            lut *= scale
            lut += offset
            lut = lut.clip(0, 255)
            lut = lut.astype(np.uint8)

            output_plane[:,:] = lut[plane]

    return output_image

Example 103

Project: visvis Source File: polygonalModeling.py
Function: draw
    def _Draw(self, shading, refColor, shader):
        """ The actual drawing. Used for drawing faces, lines, and shape.
        """
        
        # Need vertices
        if self._vertices is None:
            return
        
        # Prepare normals
        if shading != 'plain':            
            # Need normals
            if self._normals is None:
                processing.calculateNormals(self)
            # Do we need flat normals?
            if shading == 'flat':
                if self._flatNormals is None:
                    processing.calculateFlatNormals(self)
                normals = self._flatNormals 
            else:
                normals = self._normals
            #
            gl.glEnableClientState(gl.GL_NORMAL_ARRAY)
            gl.glNormalPointerf(normals)
        
        # Prepare vertices (in the code above the vertex array can be updated)
        gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
        gl.glVertexPointerf(self._vertices)
        
        # Prepare colormap indices, texture cords or colors (if available)
        useTexCords = False
        SH_ALBEIDO = shaders.SH_MF_ALBEIDO_UNIT
        if self._values is not None:
            values = values2 = self._values
            if self._values2 is not None:
                values2 = self._values2
            if values.shape[1] == 1:
                # Colormap: use values2
                values = values2 
                useTexCords = True
                gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
                gl.glTexCoordPointer(1, gl.GL_FLOAT, 0, values)
                shader.SetUniform('colormap', self._colormap)
                SH_ALBEIDO = shaders.SH_MF_ALBEIDO_LUT1
            elif values.shape[1] == 2 and self._texture is not None:
                # texcords, use original values
                useTexCords = True
                gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
                gl.glTexCoordPointerf(values)
                shader.SetUniform('texture', self._texture)
                SH_ALBEIDO = shaders.SH_MF_ALBEIDO_LUT2
            elif values.shape[1] in [3,4]:
                # Color, use values2
                values = values2 
                gl.glEnable(gl.GL_COLOR_MATERIAL)
                gl.glColorMaterial(gl.GL_FRONT_AND_BACK,
                                    gl.GL_AMBIENT_AND_DIFFUSE)
                gl.glEnableClientState(gl.GL_COLOR_ARRAY)
                gl.glColorPointerf(values)
                if values.shape[1] == 3:
                    SH_ALBEIDO = shaders.SH_MF_ALBEIDO_RGB
                else:
                    SH_ALBEIDO = shaders.SH_MF_ALBEIDO_RGBA
        
        # Prepare material (ambient and diffuse may be overriden by colors)
        if shading == 'plain':
            gl.glColor(*refColor)
        else:
            # Set glColor: unless ALBEIDO is RGB or RGBA, 
            # this is used to dermine the alpha value
            gl.glColor(*refColor)
            # Set material properties
            what = gl.GL_FRONT_AND_BACK
            gc = _getColor
            gl.glMaterial(what, gl.GL_AMBIENT, gc(self._ambient, refColor))
            gl.glMaterial(what, gl.GL_DIFFUSE, gc(self._diffuse, refColor))
            gl.glMaterial(what, gl.GL_SPECULAR, gc(self._specular, (1,1,1,1)))
            gl.glMaterial(what, gl.GL_SHININESS, self._shininess)
            gl.glMaterial(what, gl.GL_EMISSION, gc(self._emission, refColor))
        
        
        # Prepare lights
        if shading != 'plain':
            gl.glEnable(gl.GL_LIGHTING)
            gl.glEnable(gl.GL_NORMALIZE)  # GL_NORMALIZE or GL_RESCALE_NORMAL
            if shading == 'flat':
                gl.glShadeModel(gl.GL_FLAT)
            else:
                gl.glShadeModel(gl.GL_SMOOTH)
        
        
        # Set culling (take data aspect into account!)
        # From visvis v1.6 we use the right hand rule (CCW)
        axes = self.GetAxes()
        tmp = 1
        if axes:
            for i in axes.daspect:
                if i<0:
                    tmp *= -1
        gl.glFrontFace({1:gl.GL_CCW, -1:gl.GL_CW}[tmp])
        if self._cullFaces:
            gl.glEnable(gl.GL_CULL_FACE)
            gl.glCullFace(self._cullFaces)
        
        
        # Check number of lights
        self._EnsureRightNumberOfLights(axes, shader)
        
        # Ensure that the right albeido shader part is selected
        if shader.fragment.HasPart('albeido'):            
            shader.fragment.AddOrReplace(SH_ALBEIDO)
        
        
        if shader.isUsable and shader.hasCode and not self.useNativeShading:
            # GLSL shading
            shader.Enable()
        else:
            # Fixed pipeline
            if SH_ALBEIDO is shaders.SH_MF_ALBEIDO_LUT1:
                shader.EnableTextureOnly('colormap')
            elif SH_ALBEIDO is shaders.SH_MF_ALBEIDO_LUT2:
                shader.EnableTextureOnly('texture')
        
        # Draw
        type = {3:gl.GL_TRIANGLES, 4:gl.GL_QUADS}[self._verticesPerFace]
        if self._faces is None:
            gl.glDrawArrays(type, 0, self._vertices.shape[0])
        else:
            # Get data type
            if self._faces.dtype == np.uint8:
                face_dtype = gl.GL_UNSIGNED_BYTE
            elif self._faces.dtype == np.uint16:
                face_dtype = gl.GL_UNSIGNED_SHORT
            else:
                face_dtype = gl.GL_UNSIGNED_INT
            # Go
            N = self._faces.size
            gl.glDrawElements(type, N, face_dtype, self._faces)
        
        # Clean up
        gl.glFlush()
        gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
        gl.glDisableClientState(gl.GL_NORMAL_ARRAY)
        gl.glDisableClientState(gl.GL_COLOR_ARRAY)
        gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY)
        #
        shader.Disable()
        #
        gl.glDisable(gl.GL_COLOR_MATERIAL)
        gl.glShadeModel(gl.GL_FLAT)
        #
        gl.glDisable(gl.GL_LIGHTING)
        gl.glDisable(gl.GL_NORMALIZE)
        gl.glDisable(gl.GL_CULL_FACE)

Example 104

Project: LASIF Source File: visualizations.py
    def plot_windows(self, event, iteration, distance_bins=500,
                     ax=None, show=True):
        """
        Plot all selected windows on a epicentral distance vs duration plot
        with the color encoding the selected channels. This gives a quick
        overview of how well selected the windows for a certain event and
        iteration are.

        :param event: The event.
        :param iteration: The iteration.
        :param distance_bins: The number of bins on the epicentral
            distance axis.
        :param ax: If given, it will be plotted to this ax.
        :param show: If true, ``plt.show()`` will be called before returning.
        :return: The potentially created axes object.
        """
        from obspy.geodetics.base import locations2degrees

        event = self.comm.events.get(event)
        iteration = self.comm.iterations.get(iteration)
        pparam = iteration.get_process_params()
        window_manager = self.comm.windows.get(event, iteration)

        starttime = event["origin_time"]
        duration = (pparam["npts"] - 1) * pparam["dt"]

        # First step is to calculate all epicentral distances.
        stations = copy.deepcopy(self.comm.query.get_all_stations_for_event(
            event["event_name"]))
        for s in stations.values():
            s["epicentral_distance"] = locations2degrees(
                event["latitude"], event["longitude"], s["latitude"],
                s["longitude"])

        # Plot from 0 to however far it goes.
        min_epicentral_distance = 0
        max_epicentral_distance = math.ceil(max(
            _i["epicentral_distance"] for _i in stations.values()))
        epicentral_range = max_epicentral_distance - min_epicentral_distance

        if epicentral_range == 0:
            raise ValueError

        # Create the image that will represent the pictures in an epicentral
        # distance plot. By default everything is black.
        #
        # First dimension: Epicentral distance.
        # Second dimension: Time.
        # Third dimension: RGB tuple.
        len_time = 1000
        len_dist = distance_bins
        image = np.zeros((len_dist, len_time, 3), dtype=np.uint8)

        # Helper functions calculating the indices.
        def _time_index(value):
            frac = np.clip((value - starttime) / duration, 0, 1)
            return int(round(frac * (len_time - 1)))

        def _space_index(value):
            frac = np.clip(
                (value - min_epicentral_distance) / epicentral_range, 0, 1)
            return int(round(frac * (len_dist - 1)))

        def _color_index(channel):
            _map = {
                "Z": 2,
                "N": 1,
                "E": 0
            }
            channel = channel[-1].upper()
            if channel not in _map:
                raise ValueError
            return _map[channel]

        for channel in window_manager.list():
            station = ".".join(channel.split(".")[:2])
            for win in window_manager.get(channel):
                image[
                    _space_index(stations[station]["epicentral_distance"]),
                    _time_index(win.starttime):_time_index(win.endtime),
                    _color_index(channel)] = 255

        # From http://colorbrewer2.org/
        color_map = {
            (255, 0, 0): (228, 26, 28),  # red
            (0, 255, 0): (77, 175, 74),  # green
            (0, 0, 255): (55, 126, 184),  # blue
            (255, 0, 255): (152, 78, 163),  # purple
            (0, 255, 255): (255, 127, 0),  # orange
            (255, 255, 0): (255, 255, 51),  # yellow
            (255, 255, 255): (250, 250, 250),  # white
            (0, 0, 0): (50, 50, 50)  # More pleasent gray background
        }

        # Replace colors...fairly complex. Not sure if there is another way...
        red, green, blue = image[:, :, 0], image[:, :, 1], image[:, :, 2]
        for color, replacement in color_map.items():
            image[:, :, :][(red == color[0]) & (green == color[1]) &
                           (blue == color[2])] = replacement

        def _one(i):
            return [_i / 255.0 for _i in i]

        import matplotlib.pylab as plt
        plt.style.use("ggplot")

        artists = [
            plt.Rectangle((0, 1), 1, 1, color=_one(color_map[(0, 0, 255)])),
            plt.Rectangle((0, 1), 1, 1, color=_one(color_map[(0, 255, 0)])),
            plt.Rectangle((0, 1), 1, 1, color=_one(color_map[(255, 0, 0)])),
            plt.Rectangle((0, 1), 1, 1, color=_one(color_map[(0, 255, 255)])),
            plt.Rectangle((0, 1), 1, 1, color=_one(color_map[(255, 0, 255)])),
            plt.Rectangle((0, 1), 1, 1, color=_one(color_map[(255, 255, 0)])),
            plt.Rectangle((0, 1), 1, 1,
                          color=_one(color_map[(255, 255, 255)]))
        ]
        labels = [
            "Z",
            "N",
            "E",
            "Z + N",
            "Z + E",
            "N + E",
            "Z + N + E"
        ]

        if ax is None:
            plt.figure(figsize=(16, 9))
            ax = plt.gca()

        ax.imshow(image, aspect="auto", interpolation="nearest", vmin=0,
                  vmax=255, origin="lower")
        ax.grid()
        ax.set_title("Selected windows for iteration %s and event %s" % (
                     iteration.name, event["event_name"]))

        ax.legend(artists, labels, loc="lower right",
                  title="Selected Components")

        # Set the x-ticks.
        xticks = []
        for time in ax.get_xticks():
            # They are offset by -0.5.
            time += 0.5
            # Convert to actual time
            frac = time / float(len_time)
            time = frac * duration
            xticks.append("%.1f" % time)
        ax.set_xticklabels(xticks)
        ax.set_xlabel("Time since event in seconds")

        yticks = []
        for dist in ax.get_yticks():
            # They are offset by -0.5.
            dist += 0.5
            # Convert to actual epicentral distance.
            frac = dist / float(len_dist)
            dist = min_epicentral_distance + (frac * epicentral_range)
            yticks.append("%.1f" % dist)
        ax.set_yticklabels(yticks)
        ax.set_ylabel("Epicentral distance in degree [Binned in %i distances]"
                      % distance_bins)

        if show:
            plt.tight_layout()
            plt.show()
            plt.close()

        return ax

Example 105

Project: imageio Source File: avbin.py
Function: get_data
        def _get_data(self, index, out=None):
            avbin = self.format.avbinlib()
            
            # Modulo index (for looping)
            if self._meta['nframes'] and self._meta['nframes'] < float('inf'):
                if self._arg_loop:
                    index = index % self._meta['nframes']
            
            # Check index
            if index < 0:
                raise IndexError('Frame index must be > 0') 
            elif index >= self._meta['nframes']:
                raise IndexError('Reached end of video')
            elif index != self._framecounter:
                if index == 0:  # Rewind
                    self._close()
                    self._init_video()
                    return self._get_data(0)
                raise IndexError('Avbin format cannot seek')
            
            self._framecounter += 1            
            
            if out is None:
                out = self.create_empty_image()
            
            assert (out.dtype == np.uint8 and out.flags.c_contiguous and
                    out.shape == (self._height, self._width, 3))
            
            # Read from the file until the next packet of our video
            # stream is found
            while True:
                try:
                    avbin.avbin_read(self._file, ctypes.byref(self._packet))
                except RuntimeError:  # todo: I hope we can fix this ...
                    raise IndexError('Reached end of video too soon')
                if self._packet.stream_index != self._stream_index:
                    continue

                # Decode the image, storing data in the out array
                try:
                    ptr = out.ctypes.data
                except Exception:  # pragma: no cover - IS_PYPY
                    ptr = out.__array_interface__['data'][0]
                result = avbin.avbin_decode_video(self._stream, 
                                                  self._packet.data, 
                                                  self._packet.size, 
                                                  ptr)
                
                # Check for success. If not, continue reading the file stream
                # AK: disabled for now, because this will make the file
                # shorter; you're just dropping frames! We need to think
                # of a better solution ...
                if (not self._arg_skipempty) or result != -1:
                    break
            
            # Return array and dummy meta data
            return out, dict(timestamp=self._packet.timestamp)

Example 106

Project: GstStabilizer Source File: cv_flow_finder.py
    def optical_flow_img(self, img0, img1, blob_buf0=None):
        # for us, blob_buf0 is in the format:
        # corners
        # for now.
        # TODO: add pyramid?

        if blob_buf0 is not None and len(blob_buf0) > self.corner_count / 2:
            corners0 = blob_buf0
        else:
            corners0 = self._features(img0)

        n_features = len(corners0)

        corners1, status, errors = cv2.calcOpticalFlowPyrLK(
                    img0, img1, corners0, None,
                    winSize=(self.win_size,) * 2,
                    maxLevel=self.pyramid_level,
                    criteria=(cv2.TERM_CRITERIA_MAX_ITER | cv2.TERM_CRITERIA_EPS,
                              self.max_iterations, self.epsilon)
                    )

        # these are a few workarounds because openCV return things in a format
        # slightly different from what we want.
        if status.dtype == numpy.uint8:
            status.dtype = numpy.bool8
        if len(status.shape) > 1:
            assert(status.shape[1] == 1)
            status.shape = status.shape[:1]
        if len(errors.shape) > 1:
            assert(errors.shape[1] == 1)
            errors.shape = errors.shape[:1]

        corners0 = corners0[status]
        corners1 = corners1[status]

        errors = errors[status]
        print "%d features found, %d matched"  % (n_features, len(corners0)), ';',
        if len(errors):
            print "errors min/max/avg:", (min(errors),
                                          max(errors),
                                          (sum(errors)/len(errors)))

        return ((corners0, corners1), corners1)

Example 107

Project: opencv-python-blueprints Source File: calibrate.py
Function: process_frame
    def _process_frame(self, frame):
        """Processes each frame

            If recording mode is on (self.recording==True), this method will
            perform all the hard work of the camera calibration process:
            - for every frame, until enough frames have been processed:
                - find the chessboard corners
                - refine the coordinates of the detected corners
            - after enough frames have been processed:
                - estimate the intrinsic camera matrix and distortion
                  coefficients

            :param frame: current RGB video frame
            :returns: annotated video frame showing detected chessboard corners
        """
        # if we are not recording, just display the frame
        if not self.recording:
            return frame

        # else we're recording
        img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype(np.uint8)

        if self.record_cnt < self.record_min_num_frames:
            # need at least some number of chessboard samples before we can
            # calculate the intrinsic matrix
            ret, corners = cv2.findChessboardCorners(img_gray,
                                                     self.chessboard_size,
                                                     None)

            if ret:
                cv2.drawChessboardCorners(frame, self.chessboard_size, corners,
                                          ret)

                # refine found corners
                criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                            30, 0.01)
                cv2.cornerSubPix(img_gray, corners, (9, 9), (-1, -1), criteria)

                self.obj_points.append(self.objp)
                self.img_points.append(corners)
                self.record_cnt += 1

        else:
            # we have already collected enough frames, so now we want to
            # calculate the intrinsic camera matrix (K) and the distortion
            # vector (dist)
            print "Calibrating..."
            ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(self.obj_points,
                                                             self.img_points,
                                                             (self.imgHeight,
                                                              self.imgWidth),
                                                             None, None)
            print "K=", K
            print "dist=", dist

            # double-check reconstruction error (should be as close to zero as
            # possible)
            mean_error = 0
            for i in xrange(len(self.obj_points)):
                img_points2, _ = cv2.projectPoints(self.obj_points[i],
                                                   rvecs[i], tvecs[i], K, dist)
                error = cv2.norm(self.img_points[i], img_points2,
                                 cv2.NORM_L2)/len(img_points2)
                mean_error += error

            print "mean error=", mean_error

            self.recording = False
            self._reset_recording()
            self.button_calibrate.Enable()

        return frame

Example 108

Project: reseg Source File: camvid.py
def load_dataset_camvid(path, load_greylevel_mask=False, classes='subset_11',
                        resize_images=False,
                        resize_size=-1,
                        use_standard_split=True,
                        save=False,
                        color_space='RGB'):
    # WORKING: but image Seq05VD_f02610_L.png has some problems, some pixels
    # have other values so I treated as Void

    img_train_path = os.path.join(path, 'imgs', 'train')
    img_test_path = os.path.join(path, 'imgs', 'test')
    img_val_path = os.path.join(path, 'imgs', 'val')

    gt_train_path = os.path.join(path, 'gt', 'train')
    gt_test_path = os.path.join(path, 'gt', 'test')
    gt_val_path = os.path.join(path, 'gt', 'val')

    camvid_all_colors = OrderedDict([
        ("Animal", np.array([[64, 128, 64]], dtype=np.uint8)),
        ("Archway", np.array([[192, 0, 128]], dtype=np.uint8)),
        ("Bicyclist", np.array([[0, 128, 192]], dtype=np.uint8)),
        ("Bridge", np.array([[0, 128, 64]], dtype=np.uint8)),
        ("Building", np.array([[128, 0, 0]], dtype=np.uint8)),
        ("Car", np.array([[64, 0, 128]], dtype=np.uint8)),
        ("CartLuggagePram", np.array([[64, 0, 192]], dtype=np.uint8)),
        ("Child", np.array([[192, 128, 64]], dtype=np.uint8)),
        ("Column_Pole", np.array([[192, 192, 128]], dtype=np.uint8)),
        ("Fence", np.array([[64, 64, 128]], dtype=np.uint8)),
        ("LaneMkgsDriv", np.array([[128, 0, 192]], dtype=np.uint8)),
        ("LaneMkgsNonDriv", np.array([[192, 0, 64]], dtype=np.uint8)),
        ("Misc_Text", np.array([[128, 128, 64]], dtype=np.uint8)),
        ("MotorcycleScooter", np.array([[192, 0, 192]], dtype=np.uint8)),
        ("OtherMoving", np.array([[128, 64, 64]], dtype=np.uint8)),
        ("ParkingBlock", np.array([[64, 192, 128]], dtype=np.uint8)),
        ("Pedestrian", np.array([[64, 64, 0]], dtype=np.uint8)),
        ("Road", np.array([[128, 64, 128]], dtype=np.uint8)),
        ("RoadShoulder", np.array([[128, 128, 192]], dtype=np.uint8)),
        ("Sidewalk", np.array([[0, 0, 192]], dtype=np.uint8)),
        ("SignSymbol", np.array([[192, 128, 128]], dtype=np.uint8)),
        ("Sky", np.array([[128, 128, 128]], dtype=np.uint8)),
        ("SUVPickupTruck", np.array([[64, 128, 192]], dtype=np.uint8)),
        ("TrafficCone", np.array([[0, 0, 64]], dtype=np.uint8)),
        ("TrafficLight", np.array([[0, 64, 64]], dtype=np.uint8)),
        ("Train", np.array([[192, 64, 128]], dtype=np.uint8)),
        ("Tree", np.array([[128, 128, 0]], dtype=np.uint8)),
        ("Truck_Bus", np.array([[192, 128, 192]], dtype=np.uint8)),
        ("Tunnel", np.array([[64, 0, 64]], dtype=np.uint8)),
        ("VegetationMisc", np.array([[192, 192, 0]], dtype=np.uint8)),
        ("Wall", np.array([[64, 192, 0]], dtype=np.uint8)),
        ("Void", np.array([[0, 0, 0]], dtype=np.uint8))
    ])

    camvid_11_colors = OrderedDict([
        ("Sky", np.array([[128, 128, 128]], dtype=np.uint8)),
        ("Building", np.array([[128, 0, 0],   # Building
                               [64, 192, 0],  # Wall
                               [0, 128, 64]   # Bridge
                               ], dtype=np.uint8)),
        ("Column_Pole", np.array([[192, 192, 128]], dtype=np.uint8)),
        ("Road", np.array([[128, 64, 128],  # Road
                           [128, 0, 192],   # LaneMkgsDriv
                           [192, 0, 64],    # LaneMkgsNonDriv
                           [128, 128, 192]  # RoadShoulder
                           ], dtype=np.uint8)),
        ("Sidewalk", np.array([[0, 0, 192],    # Sidewalk
                               [64, 192, 128]  # ParkingBlock
                               ], dtype=np.uint8)),
        ("Tree", np.array([[128, 128, 0],  # Tree
                           [192, 192, 0]   # VegetationMisc
                           ], dtype=np.uint8)),
        ("SignSymbol", np.array([[192, 128, 128],  # SignSymbol
                                 # [128, 128, 64],   # Misc_Text
                                 [0, 64, 64],      # TrafficLight
                                 [0, 0, 64]        # TrafficCone
                                 ], dtype=np.uint8)),
        ("Fence", np.array([[64, 64, 128]], dtype=np.uint8)),
        ("Car", np.array([[64, 0, 128],     # Car
                          [192, 128, 192],  # Truck_Bus
                          [64, 128, 192],   # SUVPickupTruck
                          [128, 64, 64],    # OtherMoving
                          [64, 0, 192],     # CartLuggagePram
                          ], dtype=np.uint8)),
        ("Pedestrian", np.array([[64, 64, 0],    # Pedestrian
                                 [192, 128, 64]  # Child
                                 ], dtype=np.uint8)),
        ("Bicyclist", np.array([[0, 128, 192],  # Bicyclist
                                [192, 0, 192],  # MotorcycleScooter
                                ], dtype=np.uint8)),
        ("Void", np.array([[0, 0, 0]], dtype=np.uint8))
    ])  # consider as void all the other classes

    camvid_colors = camvid_11_colors if classes == 'subset_11' else \
        camvid_all_colors

    print "Processing Camvid train dataset..."
    img_train, mask_train, filenames_train = load_images(
        img_train_path, gt_train_path, camvid_colors, load_greylevel_mask,
        resize_images, resize_size, save, color_space)

    print "Processing Camvid test dataset..."
    img_test, mask_test, filenames_test = load_images(
        img_test_path, gt_test_path, camvid_colors, load_greylevel_mask,
        resize_images, resize_size, save, color_space)
    print "Processing Camvid validation dataset..."
    img_val, mask_val, filenames_val = load_images(
        img_val_path, gt_val_path, camvid_colors, load_greylevel_mask,
        resize_images, resize_size, save, color_space)

    return (img_train, mask_train, filenames_train,
            img_test, mask_test, filenames_test,
            img_val, mask_val, filenames_val)

Example 109

Project: pyrobotlab Source File: faceidentification.py
Function: read_images
def read_images(path, sz=(256,256)):
    """Reads the images in a given folder, resizes images on the fly if size is given.

    Args:
        path: Path to a folder with subfolders representing the subjects (persons).
        sz: A tuple with the size Resizes 

    Returns:
        A list [X,y, foldernames]

            X: The images, which is a Python list of numpy arrays.
            y: The corresponding labels (the unique number of the subject, person) in a Python list.
            foldernames:  The list of all names in the database
    """
    c = 0
    X,y,Z = [], [], []
    folder_names = []   # This will be the list of all known names in the database
    
    #
    # Files are in separate directories.  The directory holds the "name", each
    # of the images in the file are the samples for that name
    #
    for dirname, dirnames, filenames in os.walk(path):
        for subdirname in dirnames:
            folder_names.append(subdirname)
            subject_path = os.path.join(dirname, subdirname)
            number_for_this_subject = len(os.listdir(subject_path))
            last_number = None
            count = 1.0
            saved = 0

            for filename in os.listdir(subject_path):
                try:
                    #
                    # Limit the number of images per person to no more than 10
                    # If there are more than 10, just take a sample of the 10
                    #
                    if int(count*current_max_subjects/number_for_this_subject) != last_number:
                        #
                        # Get the image file
                        #
                        im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)
                    
                        #
                        # For some reason, windows sticks an indexing file into each directory
                        #
                        if filename != "Thumbs.db":
                            # resize to given size (if given)
                            if (sz is not None):
                                im = cv2.resize(im, sz)

                            im_mask = im & facefilter
                            X.append(np.asarray(im, dtype=np.uint8))
                            Z.append(np.asarray(im_mask, dtype=np.uint8))
                            y.append(c)           
                            saved += 1                 
                            
                except IOError, (errno, strerror):
                    print "I/O error({0}): {1}".format(errno, strerror)

                except:
                    #
                    # Ignore unreadable files
                    #
                    print "Unknown file error:", sys.exc_info()[0], im, sz
                    pass  
                last_number = int(count*current_max_subjects/number_for_this_subject)
                count += 1
                
            if debug_on:
                print saved, "images imported for subject[", c, "]: ", subdirname
            c += 1  
    return [X,y,Z, folder_names]

Example 110

Project: shadertoy-render Source File: shadertoy-render.py
Function: init
    def __init__(self,
                 glsl,
                 filename,
                 interactive=True,
                 output_size=None,
                 render_size=None,
                 position=None,
                 start_time=0.0,
                 interval='auto',
                 duration=None,
                 always_on_top=False,
                 paused=False,
                 output=None,
                 progress_file=None,
                 ffmpeg_pipe=None):

        app.Canvas.__init__(self,
                            keys='interactive' if interactive else None,
                            size=render_size if render_size else output_size,
                            position=None,
                            title=filename,
                            always_on_top=always_on_top,
                            show=False,
                            resizable=ffmpeg_pipe is None)

        self._filename = filename
        self._interactive = interactive
        self._output_size = output_size
        self._render_size = render_size if render_size else output_size
        self._output = output
        self._profile = False
        self._paused = paused
        self._timer = None
        self._start_time = start_time
        self._interval = interval
        self._ffmpeg_pipe = ffmpeg_pipe

        # Determine number of frames to render

        if duration:
            assert interval != 'auto'
            self._render_frame_count = math.ceil(duration / interval) + 1
        elif not interactive:
            self._render_frame_count = 1
        else:
            self._render_frame_count = None

        self._render_frame_index = 0

        clock = time.clock()
        self._clock_time_zero = clock - start_time
        self._clock_time_start = clock

        if position is not None:
            self.position = position

        # Initialize with a "known good" shader program, so that we can set all
        # the inputs once against it.

        self.program = gloo.Program(vertex, fragment_template % error_shader)
        self.program["position"] = [(-1, -1), (-1, 1), (1, 1), (-1, -1), (1, 1), (1, -1)]
        self.program['iMouse'] = 0.0, 0.0, 0.0, 0.0
        self.program['iSampleRate'] = 44100.0

        for i in range(4):
            self.program['iChannelTime[%d]' % i] = 0.0
        self.program['iGlobalTime'] = start_time

        self.program['iOffset'] = 0.0, 0.0

        self.activate_zoom()
        self.set_channel_input(noise(resolution=256, nchannels=3), i=0)
        self.set_channel_input(noise(resolution=256, nchannels=1), i=1)

        self.set_shader(glsl)

        if interactive:
            if not paused:
                self.ensure_timer()
            self.show()
        else:
            self._tile_index = 0
            self._tile_count = ((output_size[0] + render_size[0] - 1) // render_size[0]) * \
                               ((output_size[1] + render_size[1] - 1) // render_size[1])
            self._tile_coord = [0, 0]
            self._progress_file = progress_file

            # Note that gloo.Texture2D and gloo.RenderBuffer use the numpy convention for dimensions ('shape'),
            # i.e., HxW

            self._rendertex = gloo.Texture2D(shape=render_size[::-1] + (4,))
            self._fbo = gloo.FrameBuffer(self._rendertex, gloo.RenderBuffer(shape=render_size[::-1]))

            # Allocate buffer to hold final image

            self._img = numpy.zeros(shape=self._output_size[::-1] + (4,), dtype=numpy.uint8)

            # Write progress file now so we'll know right away if there are any problems writing to it

            if self._progress_file:
                self.write_img(self._img, self._progress_file)

            self.program['iResolution'] = self._output_size + (0.,)
            self.ensure_timer()

Example 111

Project: Theano-Lights Source File: toolbox.py
Function: mnist
def mnist(path='', distort=0,shuffle=False,nvalidation=10000):
	if distort!=0:
		ninst = 60000*(1 + distort)
		ntrain = ninst
		fd = open(os.path.join(path,'train-images-idx3-ubyte_distorted'))
	else:
		ninst = 60000
		try:
			fd = open(os.path.join(path,'train-images.idx3-ubyte'))
		except:
			fd = open(os.path.join(path,'train-images-idx3-ubyte'))
	loaded = np.fromfile(file=fd,dtype=np.uint8)
	trX = loaded[16:ninst*784+16].reshape((ninst,28*28)).astype(float)

	if distort!=0:
		fd = open(os.path.join(path,'train-labels-idx1-ubyte_distorted'))
	else:
		try:
			fd = open(os.path.join(path,'train-labels.idx1-ubyte'))
		except:
			fd = open(os.path.join(path,'train-labels-idx1-ubyte'))
	
	loaded = np.fromfile(file=fd,dtype=np.uint8)
	trY = loaded[8:ninst+8].reshape((ninst))

	try:
		fd = open(os.path.join(path,'t10k-images.idx3-ubyte'))
	except:
		fd = open(os.path.join(path,'t10k-images-idx3-ubyte'))

	loaded = np.fromfile(file=fd,dtype=np.uint8)
	teX = loaded[16:].reshape((10000,28*28)).astype(float)

	try:
		fd = open(os.path.join(path,'t10k-labels.idx1-ubyte'))
	except:
		fd = open(os.path.join(path,'t10k-labels-idx1-ubyte'))

	loaded = np.fromfile(file=fd,dtype=np.uint8)
	teY = loaded[8:].reshape((10000))

	trX /= 255.
	teX /= 255.
    
	ntrain = ninst-nvalidation*(1 + distort)
	vaX = trX[ntrain:ninst]
	vaY = trY[ntrain:ninst]
	trX = trX[0:ntrain]
	trY = trY[0:ntrain]

	if shuffle:
		idx = np.random.permutation(ntrain)
		trX_n = trX
		trY_n = trY
		for i in range(ntrain):
			trX[i] = trX_n[idx[i]]
			trY[i] = trY_n[idx[i]]
		trX_n = None
		trY_n = None

	trY = one_hot(trY, 10)
	vaY = one_hot(vaY, 10)
	teY = one_hot(teY, 10)

	data = {}
	data['P'] = len(trX)
	data['n_x'] = int(trX.shape[1])
	data['n_y'] = int(trY.shape[1])
	data['shape_x'] = (28,28)

	data['tr_X'] = trX.astype('float32'); 
	data['va_X'] = vaX.astype('float32'); 
	data['te_X'] = teX.astype('float32'); 
	data['tr_Y'] = trY.astype('float32'); 
	data['va_Y'] = vaY.astype('float32'); 
	data['te_Y'] = teY.astype('float32');

	return data

Example 112

Project: hedge Source File: diff_shared_segmat.py
    @memoize_method
    def get_kernel(self, diff_op_cls, elgroup, for_benchmark=False):
        from cgen import \
                Pointer, POD, Value, ArrayOf, \
                Module, FunctionDeclaration, FunctionBody, Block, \
                Line, Define, Include, \
                Initializer, If, For, Statement, Assign

        from cgen import dtype_to_ctype
        from cgen.cuda import CudaShared, CudaGlobal

        discr = self.discr
        d = discr.dimensions
        dims = range(d)
        given = self.plan.given

        par = self.plan.parallelism

        diffmat_data = self.gpu_diffmats(diff_op_cls, elgroup)
        elgroup, = discr.element_groups

        float_type = given.float_type

        f_decl = CudaGlobal(FunctionDeclaration(Value("void", "apply_diff_mat"),
            [Pointer(POD(numpy.uint8, "gmem_diff_rst_mat")),
                #Pointer(POD(float_type, "debugbuf")),
                ] + [Pointer(POD(float_type, "drst%d_global" % i)) for i in dims]
            ))

        rst_channels = given.devdata.make_valid_tex_channel_count(d)
        cmod = Module([
                Include("pycuda-helpers.hpp"),
                Line(),
                Value("texture<fp_tex_%s, 1, cudaReadModeElementType>"
                    % dtype_to_ctype(float_type),
                    "field_tex"),
                Line(),
                Define("DIMENSIONS", discr.dimensions),
                Define("DOFS_PER_EL", given.dofs_per_el()),
                Line(),
                Define("SEGMENT_DOF", "threadIdx.x"),
                Define("PAR_MB_NR", "threadIdx.y"),
                Line(),
                Define("MB_SEGMENT", "blockIdx.x"),
                Define("MACROBLOCK_NR", "blockIdx.y"),
                Line(),
                Define("DOFS_PER_SEGMENT", self.plan.segment_size),
                Define("SEGMENTS_PER_MB", self.plan.segments_per_microblock()),
                Define("ALIGNED_DOFS_PER_MB", given.microblock.aligned_floats),
                Define("ELS_PER_MB", given.microblock.elements),
                Line(),
                Define("PAR_MB_COUNT", par.parallel),
                Define("INLINE_MB_COUNT", par.inline),
                Define("SEQ_MB_COUNT", par.serial),
                Line(),
                Define("THREAD_NUM", "(SEGMENT_DOF+PAR_MB_NR*DOFS_PER_SEGMENT)"),
                Define("COALESCING_THREAD_COUNT", "(PAR_MB_COUNT*DOFS_PER_SEGMENT)"),
                Line(),
                Define("MB_DOF_BASE", "(MB_SEGMENT*DOFS_PER_SEGMENT)"),
                Define("MB_DOF", "(MB_DOF_BASE+SEGMENT_DOF)"),
                Define("GLOBAL_MB_NR_BASE",
                    "(MACROBLOCK_NR*PAR_MB_COUNT*INLINE_MB_COUNT*SEQ_MB_COUNT)"),
                Define("GLOBAL_MB_NR",
                    "(GLOBAL_MB_NR_BASE"
                    "+ (seq_mb_number*PAR_MB_COUNT + PAR_MB_NR)*INLINE_MB_COUNT)"),
                Define("GLOBAL_MB_DOF_BASE", "(GLOBAL_MB_NR*ALIGNED_DOFS_PER_MB)"),
                Line(),
                Define("DIFFMAT_SEGMENT_FLOATS", diffmat_data.block_floats),
                Define("DIFFMAT_SEGMENT_BYTES", "(DIFFMAT_SEGMENT_FLOATS*%d)"
                     % given.float_size()),
                Define("DIFFMAT_COLUMNS", diffmat_data.matrix_columns),
                Line(),
                CudaShared(ArrayOf(POD(float_type, "smem_diff_rst_mat"),
                    "DIFFMAT_COLUMNS*DOFS_PER_SEGMENT")),
                Line(),
                ])

        S = Statement
        f_body = Block()

        f_body.extend_log_block("calculate responsibility data", [
            Initializer(POD(numpy.uint16, "mb_el"),
                "MB_DOF/DOFS_PER_EL"),
            ])

        from hedge.backends.cuda.tools import get_load_code
        f_body.extend(
            get_load_code(
                dest="smem_diff_rst_mat",
                base="gmem_diff_rst_mat + MB_SEGMENT*DIFFMAT_SEGMENT_BYTES",
                bytes="DIFFMAT_SEGMENT_BYTES",
                descr="load diff mat segment")
            +[S("__syncthreads()"), Line()])

        # ---------------------------------------------------------------------
        def get_scalar_diff_code():
            code = []
            for inl in range(par.inline):
                for axis in dims:
                    code.append(
                        Initializer(POD(float_type, "d%drst%d" % (inl, axis)), 0))

            code.append(Line())

            def get_mat_entry(row, col, axis):
                return ("smem_diff_rst_mat["
                        "%(row)s*DIFFMAT_COLUMNS + %(axis)s*DOFS_PER_EL"
                        " + %(col)s"
                        "]" % {"row":row, "col":col, "axis":axis}
                        )

            tex_channels = ["x", "y", "z", "w"]
            from hedge.backends.cuda.tools import unroll
            code.extend(
                    [POD(float_type, "field_value%d" % inl)
                        for inl in range(par.inline)]
                    +[Line()]
                    +unroll(lambda j: [
                        Assign("field_value%d" % inl,
                            "fp_tex1Dfetch(field_tex, GLOBAL_MB_DOF_BASE + %d*ALIGNED_DOFS_PER_MB "
                            "+ mb_el*DOFS_PER_EL + %s)" % (inl, j)
                            )
                        for inl in range(par.inline)]
                        +[Line()]
                        +[S("d%drst%d += %s * field_value%d"
                            % (inl, axis, get_mat_entry("SEGMENT_DOF", j, axis), inl))
                        for axis in dims
                        for inl in range(par.inline)]
                        +[Line()],
                        given.dofs_per_el(), self.plan.max_unroll)
                    )

            store_code = Block()
            for inl in range(par.inline):
                for rst_axis in dims:
                    store_code.append(Assign(
                        "drst%d_global[GLOBAL_MB_DOF_BASE"
                        " + %d*ALIGNED_DOFS_PER_MB + MB_DOF]" % (rst_axis, inl),
                        "d%drst%d" % (inl, rst_axis),
                        ))

            code.append(If("MB_DOF < DOFS_PER_EL*ELS_PER_MB", store_code))

            return code

        f_body.extend([
            For("unsigned short seq_mb_number = 0",
                "seq_mb_number < SEQ_MB_COUNT",
                "++seq_mb_number",
                Block(get_scalar_diff_code()))
            ])

        # finish off ----------------------------------------------------------
        cmod.append(FunctionBody(f_decl, f_body))

        if not for_benchmark and "cuda_dump_kernels" in discr.debug:
            from hedge.tools import open_unique_debug_file
            open_unique_debug_file("diff", ".cu").write(str(cmod))

        mod = SourceModule(cmod,
                keep="cuda_keep_kernels" in discr.debug,
                #options=["--maxrregcount=10"]
                )

        field_texref = mod.get_texref("field_tex")

        func = mod.get_function("apply_diff_mat")
        func.prepare(
                discr.dimensions*[float_type] + ["P"],
                block=(self.plan.segment_size, par.parallel, 1),
                texrefs=[field_texref])

        if "cuda_diff" in discr.debug:
            print "diff: lmem=%d smem=%d regs=%d" % (
                    func.local_size_bytes,
                    func.shared_size_bytes,
                    func.num_regs)

        return func, field_texref

Example 113

Project: appengine-opencv-sudoku-python Source File: sudoku_image_parser.py
    def _get_puzzle(self):
        """Get the numbers in the puzzle in a 9x9 array.

        Returns:
            A numpy.ndarray filled with the numbers of the puzzle.
        """

        # a 9x9 matrix to store our sudoku puzzle
        sudoku_matrix = np.zeros((NUM_ROWS, NUM_ROWS), np.uint8)

        contours, image_copy = self._get_major_contours(
                self.resized_largest_square,
                sigma1=3,
                threshold_type=cv2.THRESH_BINARY_INV,
                dilate=False)

        # Erode and dilate the image to further amplify features.
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        erode = cv2.erode(image_copy, kernel)
        dilate = cv2.dilate(erode, kernel)

        for contour in contours:
            area = cv2.contourArea(contour)

            # if 100 < area < 800:
            if 50 < area < 800:
                (bx, by, bw, bh) = cv2.boundingRect(contour)
                # if (100 < bw*bh < 1200) and (10 < bw < 40) and (25 < bh < 45):
                # aju
                if (100 < bw*bh < 1200) and (5 < bw < 40) and (10 < bh < 45):
                    # Get the region of interest, which contains the number.
                    roi = dilate[by:by + bh, bx:bx + bw]
                    small_roi = cv2.resize(roi, (10, 10))
                    feature = small_roi.reshape((1, 100)).astype(np.float32)

                    # Use the model to find the most likely number.
                    ret, results, neigh, dist = self.model.find_nearest(
                            feature, k=1)
                    integer = int(results.ravel()[0])

                    # gridx and gridy are indices of row and column in Sudoku
                    gridy = (bx + bw/2) / (SUDOKU_RESIZE / NUM_ROWS)
                    gridx = (by + bh/2) / (SUDOKU_RESIZE / NUM_ROWS)
                    sudoku_matrix.itemset((gridx, gridy), integer)

        return sudoku_matrix

Example 114

Project: landlab Source File: rock_weathering.py
Function: main
def main():
    
    # INITIALIZE

    # User-defined parameters
    nr = 200  # number of rows in grid
    nc = 200  # number of columns in grid
    plot_interval = 0.05   # time interval for plotting (unscaled)
    run_duration = 5.0   # duration of run (unscaled)
    report_interval = 10.0  # report interval, in real-time seconds
    frac_spacing = 10  # average fracture spacing, nodes
    outfilename = 'wx' # name for netCDF files
    
    # Remember the clock time, and calculate when we next want to report
    # progress.
    current_real_time = time.time()
    next_report = current_real_time + report_interval
    
    # Counter for output files
    time_slice = 0

    # Create grid
    mg = RasterModelGrid(nr, nc, 1.0)
    
    # Make the boundaries be walls
    mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
    
    # Set up the states and pair transitions.
    ns_dict = { 0 : 'rock', 1 : 'saprolite' }
    xn_list = setup_transition_list()

    # Create the node-state array and attach it to the grid.
    # (Note use of numpy's uint8 data type. This saves memory AND allows us
    # to write output to a netCDF3 file; netCDF3 does not handle the default
    # 64-bit integer type)
    node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=np.uint8)
    
    node_state_grid[:] = make_frac_grid(frac_spacing, model_grid=mg)    
    
    # Create the CA model
    ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid)

    # Set up the color map
    rock_color = (0.8, 0.8, 0.8)
    sap_color = (0.4, 0.2, 0)
    clist = [rock_color, sap_color]
    my_cmap = matplotlib.colors.ListedColormap(clist)
    
    # Create a CAPlotter object for handling screen display
    ca_plotter = CAPlotter(ca, cmap=my_cmap)
    
    # Plot the initial grid
    ca_plotter.update_plot()
    
    # Output the initial grid to file
    write_netcdf((outfilename+str(time_slice)+'.nc'), mg, 
                 #format='NETCDF3_64BIT',
                 names='node_state_map')

    # RUN
    current_time = 0.0
    while current_time < run_duration:
        
        # Once in a while, print out simulation and real time to let the user
        # know that the sim is running ok
        current_real_time = time.time()
        if current_real_time >= next_report:
            print('Current sim time', current_time, '(',
                  100 * current_time/run_duration, '%)')
            next_report = current_real_time + report_interval
        
        # Run the model forward in time until the next output step
        ca.run(current_time+plot_interval, ca.node_state, 
               plot_each_transition=False)
        current_time += plot_interval
        
        # Plot the current grid
        ca_plotter.update_plot()
        
        # Output the current grid to a netCDF file
        time_slice += 1
        write_netcdf((outfilename+str(time_slice)+'.nc'), mg, 
                     #format='NETCDF3_64BIT',
                     names='node_state_map')        
        

    # FINALIZE

    # Plot
    ca_plotter.finalize()

Example 115

Project: instakit Source File: colortype.py
Function: colortype
def ColorType(name, *args, **kwargs):
    global color_types
    dtype = numpy.dtype(kwargs.pop('dtype', numpy.uint8))
    if name not in color_types[dtype.name]:
        channels = split_abbreviations(name)
        
        class Color(namedtuple(name, channels)):
            
            def __repr__(self):
                return "%s(dtype=%s, %s)" % (
                    name, self.__class__.dtype.name,
                    ', '.join(['%s=%s' % (i[0], i[1]) \
                        for i in self._asdict().items()]))
            
            def __hex__(self):
                return '0x' + "%x" * len(self) % self
            
            def __int__(self):
                return int(self.__hex__(), 16)
            
            def __long__(self):
                return long(self.__hex__(), 16)
            
            def __hash__(self):
                return self.__long__()
            
            def __eq__(self, other):
                if not len(other) == len(self):
                    return False
                return all([self[i] == other[i] for i in xrange(len(self))])
            
            def __unicode__(self):
                return unicode(repr(self))
            
            def composite(self):
                return numpy.dtype([
                    (k, self.__class__.dtype) for k, v in self._asdict().items()])
            
        Color.__name__ = "%s<%s>" % (name, dtype.name)
        Color.dtype = dtype
        color_types[dtype.name][name] = Color
    return color_types[dtype.name][name]

Example 116

Project: chumpy Source File: api_compatibility.py
def main():
    
    #sample_array
    
    ###############################
    hd2('Array Creation Routines')
    
    hd3('Ones and zeros')

    r('empty', {'shape': (2,4,2)}, {'dtype': np.uint8, 'order': 'C'})
    r('empty_like', {'prototype': np.empty((2,4,2))}, {'dtype': np.float64, 'order': 'C'})
    r('eye', {'N': 10}, {'M': 5, 'k': 0, 'dtype': np.float64})
    r('identity', {'n': 10}, {'dtype': np.float64})
    r('ones', {'shape': (2,4,2)}, {'dtype': np.uint8, 'order': 'C'})
    r('ones_like', {'a': np.empty((2,4,2))}, {'dtype': np.float64, 'order': 'C'})
    r('zeros', {'shape': (2,4,2)}, {'dtype': np.uint8, 'order': 'C'})
    r('zeros_like', {'a': np.empty((2,4,2))}, {'dtype': np.float64, 'order': 'C'})
    
    hd3('From existing data')
    r('array', {'object': [1,2,3]}, {'dtype': np.float64, 'order': 'C', 'subok': False, 'ndmin': 2})
    r('asarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('asanyarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('ascontiguousarray', {'a': np.array([1,2,3])}, {'dtype': np.float64})
    r('asmatrix', {'data': np.array([1,2,3])}, {'dtype': np.float64})
    r('copy', (np.array([1,2,3]),), {})
    r('frombuffer', {'buffer': np.array([1,2,3])}, {})
    m('fromfile')
    r('fromfunction', {'function': lambda i, j: i + j, 'shape': (3, 3)}, {'dtype': np.float64})
    # function, shape, **kwargs
    # lambda i, j: i + j, (3, 3), dtype=int
    r('fromiter', {'iter': [1,2,3,4], 'dtype': np.float64}, {'count': 2})
    r('fromstring', {'string': '\x01\x02', 'dtype': np.uint8}, {})
    r('loadtxt', {'fname': StringIO("0 1\n2 3")}, {})

    hd3('Creating record arrays (wont be implemented)')
    hd3('Creating character arrays (wont be implemented)')

    hd3('Numerical ranges')
    r('arange', {'start': 0, 'stop': 10}, {'step': 2, 'dtype': np.float64})
    r('linspace', {'start': 0, 'stop': 10}, {'num': 2, 'endpoint': 10, 'retstep': 1})
    r('logspace', {'start': 0, 'stop': 10}, {'num': 2, 'endpoint': 10, 'base': 1})
    r('meshgrid', ([1,2,3], [4,5,6]), {})
    m('mgrid')
    m('ogrid')
    
    hd3('Building matrices')
    r('diag', {'v': np.arange(9).reshape((3,3))}, {'k': 0})
    r('diagflat', {'v': [[1,2], [3,4]]}, {})
    r('tri', {'N': 3}, {'M': 5, 'k': 2, 'dtype': np.float64})
    r('tril', {'m': [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]}, {'k': -1})
    r('triu', {'m': [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]}, {'k': -1})
    r('vander', {'x': np.array([1, 2, 3, 5])}, {'N': 3})
    
    ###############################
    hd2('Array manipulation routines')
    
    hd3('Basic operations')
    r('copyto', {'dst': np.eye(3), 'src': np.eye(3)}, {})
    
    hd3('Changing array shape')
    r('reshape', {'a': np.eye(3), 'newshape': (9,)}, {'order' : 'C'})
    r('ravel', {'a': np.eye(3)}, {'order' : 'C'})
    m('flat')
    m('flatten')
    
    hd3('Transpose-like operations')
    r('rollaxis', {'a': np.ones((3,4,5,6)), 'axis': 3}, {'start': 0})
    r('swapaxes', {'a': np.array([[1,2,3]]), 'axis1': 0, 'axis2': 1}, {})
    r('transpose', {'a': np.arange(4).reshape((2,2))}, {'axes': (1,0)})
    
    hd3('Changing number of dimensions')
    r('atleast_1d', (np.eye(3),), {})
    r('atleast_2d', (np.eye(3),), {})
    r('atleast_3d', (np.eye(3),), {})
    m('broadcast')
    m('broadcast_arrays')
    r('expand_dims', (np.array([1,2]),2), {})
    r('squeeze', {'a': (np.array([[[1,2,3]]]))}, {})
    
    hd3('Changing kind of array')
    r('asarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('asanyarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('asmatrix', {'data': np.array([1,2,3])}, {})
    r('asfarray', {'a': np.array([1,2,3])}, {})
    r('asfortranarray', {'a': np.array([1,2,3])}, {})
    r('asscalar', {'a': np.array([24])}, {})
    r('require', {'a': np.array([24])}, {})
    
    hd3('Joining arrays')
    m('column_stack')
    r('concatenate', ((np.eye(3), np.eye(3)),1), {})
    r('dstack', ((np.eye(3), np.eye(3)),), {})
    r('hstack', ((np.eye(3), np.eye(3)),), {})
    r('vstack', ((np.eye(3), np.eye(3)),), {})

    hd3('Splitting arrays')
    m('array_split')
    m('dsplit')
    m('hsplit')
    m('split')
    m('vsplit')

    hd3('Tiling arrays')
    r('tile', (np.array([0, 1, 2]),2), {})
    r('repeat', (np.array([[1,2],[3,4]]), 3), {'axis': 1})

    hd3('Adding and removing elements')
    m('delete')
    m('insert')
    m('append')
    m('resize')
    m('trim_zeros')
    m('unique')
    
    hd3('Rearranging elements')
    r('fliplr', (np.eye(3),), {})
    r('flipud', (np.eye(3),), {})
    r('reshape', {'a': np.eye(3), 'newshape': (9,)}, {'order' : 'C'})
    r('roll', (np.arange(10), 2), {})
    r('rot90', (np.arange(4).reshape((2,2)),), {})
        
    ###############################
    hd2('Linear algebra (numpy.linalg)')
    
    extra_args = {'nplib': numpy.linalg, 'chlib': ch.linalg}
    
    hd3('Matrix and dot products')
    r('dot', {'a': np.eye(3), 'b': np.eye(3)}, {})
    r('dot', {'a': np.eye(3).ravel(), 'b': np.eye(3).ravel()}, {})
    r('vdot', (np.eye(3).ravel(), np.eye(3).ravel()), {})
    r('inner', (np.eye(3).ravel(), np.eye(3).ravel()), {})
    r('outer', (np.eye(3).ravel(), np.eye(3).ravel()), {})
    r('tensordot', {'a': np.eye(3), 'b': np.eye(3)}, {})
    m('einsum')
    r('matrix_power', {'M': np.eye(3), 'n': 2}, {}, **extra_args)
    r('kron', {'a': np.eye(3), 'b': np.eye(3)}, {})
        
    hd3('Decompositions')
    r('cholesky', {'a': np.eye(3)}, {}, **extra_args)
    r('qr', {'a': np.eye(3)}, {}, **extra_args)
    r('svd', (np.eye(3),), {}, **extra_args)
    
    hd3('Matrix eigenvalues')
    r('eig', (np.eye(3),), {}, **extra_args)
    r('eigh', (np.eye(3),), {}, **extra_args)
    r('eigvals', (np.eye(3),), {}, **extra_args)
    r('eigvalsh', (np.eye(3),), {}, **extra_args)
    
    hd3('Norms and other numbers')
    r('norm', (np.eye(3),), {}, **extra_args)
    r('cond', (np.eye(3),), {}, **extra_args)
    r('det', (np.eye(3),), {}, **extra_args)
    r('slogdet', (np.eye(3),), {}, **extra_args)
    r('trace', (np.eye(3),), {})
    
    hd3('Solving equations and inverting matrices')
    r('solve', (np.eye(3),np.ones(3)), {}, **extra_args)
    r('tensorsolve', (np.eye(3),np.ones(3)), {}, **extra_args)
    r('lstsq', (np.eye(3),np.ones(3)), {}, **extra_args)
    r('inv', (np.eye(3),), {}, **extra_args)
    r('pinv', (np.eye(3),), {}, **extra_args)
    r('tensorinv', (np.eye(4*6).reshape((4,6,8,3)),), {'ind': 2}, **extra_args)
    
    hd2('Mathematical functions')

    hd3('Trigonometric functions')
    r('sin', (np.arange(3),), {})
    r('cos', (np.arange(3),), {})
    r('tan', (np.arange(3),), {})
    r('arcsin', (np.arange(3)/3.,), {})
    r('arccos', (np.arange(3)/3.,), {})
    r('arctan', (np.arange(3)/3.,), {})
    r('hypot', (np.arange(3),np.arange(3)), {})
    r('arctan2', (np.arange(3),np.arange(3)), {})
    r('degrees', (np.arange(3),), {})
    r('radians', (np.arange(3),), {})
    r('unwrap', (np.arange(3),), {})
    r('unwrap', (np.arange(3),), {})
    r('deg2rad', (np.arange(3),), {})
    r('rad2deg', (np.arange(3),), {})
    
    hd3('Hyperbolic functions')
    r('sinh', (np.arange(3),), {})
    r('cosh', (np.arange(3),), {})
    r('tanh', (np.arange(3),), {})
    r('arcsinh', (np.arange(3)/9.,), {})
    r('arccosh', (-np.arange(3)/9.,), {})
    r('arctanh', (np.arange(3)/9.,), {})
    
    hd3('Rounding')
    r('around', (np.arange(3),), {})
    r('round_', (np.arange(3),), {})
    r('rint', (np.arange(3),), {})
    r('fix', (np.arange(3),), {})
    r('floor', (np.arange(3),), {})
    r('ceil', (np.arange(3),), {})
    r('trunc', (np.arange(3),), {})
    
    hd3('Sums, products, differences')
    r('prod', (np.arange(3),), {})
    r('sum', (np.arange(3),), {})
    r('nansum', (np.arange(3),), {})
    r('cuemprod', (np.arange(3),), {})
    r('cuemsum', (np.arange(3),), {})
    r('diff', (np.arange(3),), {})
    r('ediff1d', (np.arange(3),), {})
    r('gradient', (np.arange(3),), {})
    r('cross', (np.arange(3), np.arange(3)), {})
    r('trapz', (np.arange(3),), {})
    
    hd3('Exponents and logarithms')
    r('exp', (np.arange(3),), {})
    r('expm1', (np.arange(3),), {})
    r('exp2', (np.arange(3),), {})
    r('log', (np.arange(3),), {})
    r('log10', (np.arange(3),), {})
    r('log2', (np.arange(3),), {})
    r('log1p', (np.arange(3),), {})
    r('logaddexp', (np.arange(3), np.arange(3)), {})
    r('logaddexp2', (np.arange(3), np.arange(3)), {})
    
    hd3('Other special functions')
    r('i0', (np.arange(3),), {})
    r('sinc', (np.arange(3),), {})
    
    hd3('Floating point routines')
    r('signbit', (np.arange(3),), {})
    r('copysign', (np.arange(3), np.arange(3)), {})
    r('frexp', (np.arange(3),), {})
    r('ldexp', (np.arange(3), np.arange(3)), {})
    
    hd3('Arithmetic operations')
    r('add', (np.arange(3), np.arange(3)), {})
    r('reciprocal', (np.arange(3),), {})
    r('negative', (np.arange(3),), {})
    r('multiply', (np.arange(3), np.arange(3)), {})
    r('divide', (np.arange(3), np.arange(3)), {})
    r('power', (np.arange(3), np.arange(3)), {})
    r('subtract', (np.arange(3), np.arange(3)), {})
    r('true_divide', (np.arange(3), np.arange(3)), {})
    r('floor_divide', (np.arange(3), np.arange(3)), {})
    r('fmod', (np.arange(3), np.arange(3)), {})
    r('mod', (np.arange(3), np.arange(3)), {})
    r('modf', (np.arange(3),), {})
    r('remainder', (np.arange(3), np.arange(3)), {})
    
    hd3('Handling complex numbers')
    m('angle')
    m('real')
    m('imag')
    m('conj')
    
    hd3('Miscellaneous')
    r('convolve', (np.arange(3), np.arange(3)), {})
    r('clip', (np.arange(3), 0, 2), {})
    r('sqrt', (np.arange(3),), {})
    r('square', (np.arange(3),), {})
    r('absolute', (np.arange(3),), {})
    r('fabs', (np.arange(3),), {})
    r('sign', (np.arange(3),), {})
    r('maximum', (np.arange(3), np.arange(3)), {})
    r('minimum', (np.arange(3), np.arange(3)), {})
    r('fmax', (np.arange(3), np.arange(3)), {})
    r('fmin', (np.arange(3), np.arange(3)), {})
    r('nan_to_num', (np.arange(3),), {})
    r('real_if_close', (np.arange(3),), {})
    r('interp', (2.5, [1,2,3], [3,2,0]), {})
    
    extra_args = {'nplib': numpy.random, 'chlib': ch.random}
    
    hd2('Random sampling (numpy.random)')
    hd3('Simple random data')
    r('rand', (3,), {}, **extra_args)
    r('randn', (3,), {}, **extra_args)
    r('randint', (3,), {}, **extra_args)
    r('random_integers', (3,), {}, **extra_args)
    r('random_sample', (3,), {}, **extra_args)
    r('random', (3,), {}, **extra_args)
    r('ranf', (3,), {}, **extra_args)
    r('sample', (3,), {}, **extra_args)
    r('choice', (np.ones(3),), {}, **extra_args)
    r('bytes', (3,), {}, **extra_args)
    
    hd3('Permutations')
    r('shuffle', (np.ones(3),), {}, **extra_args)
    r('permutation', (3,), {}, **extra_args)
    
    hd3('Distributions (these all pass)')
    r('beta', (.5, .5), {}, **extra_args)
    r('binomial', (.5, .5), {}, **extra_args)
    r('chisquare', (.5,), {}, **extra_args)
    r('dirichlet', ((10, 5, 3), 20,), {}, **extra_args)
    r('exponential', [], {}, **extra_args)
    r('f', [1,48,1000], {}, **extra_args)
    r('gamma', [.5], {}, **extra_args)
    make_row('...AND 28 OTHERS...', 'passed', 'passed', 'lightgreen', 'lightgreen')
    
    
    hd3('Random generator')
    r('seed', [], {}, **extra_args)
    r('get_state', [], {}, **extra_args)
    r('set_state', [np.random.get_state()], {}, **extra_args)
    
    ####################################
    hd2('Statistics')
    hd3('Order statistics')
    r('amin', (np.eye(3),),{})
    r('amax', (np.eye(3),),{})
    r('nanmin', (np.eye(3),),{})
    r('nanmax', (np.eye(3),),{})
    r('ptp', (np.eye(3),),{})
    r('percentile', (np.eye(3),50),{})

    hd3('Averages and variance')
    r('median', (np.eye(3),),{})
    r('average', (np.eye(3),),{})
    r('mean', (np.eye(3),),{})
    r('std', (np.eye(3),),{})
    r('var', (np.eye(3),),{})
    r('nanmean', (np.eye(3),),{})
    r('nanstd', (np.eye(3),),{})
    r('nanvar', (np.eye(3),),{})
    

    hd3('Correlating')
    r('corrcoef', (np.eye(3),),{})
    r('correlate', ([1, 2, 3], [0, 1, 0.5]),{})
    r('cov', (np.eye(3),),{})
    
    hd3('Histograms')
    r('histogram', (np.eye(3),),{})
    r('histogram2d', (np.eye(3).ravel(),np.eye(3).ravel()),{})
    r('histogramdd', (np.eye(3).ravel(),),{})
    r('bincount', (np.asarray(np.eye(3).ravel(), np.uint32),),{})
    r('digitize', (np.array([0.2, 6.4, 3.0, 1.6]), np.array([0.0, 1.0, 2.5, 4.0, 10.0])),{})
    
    ####################################
    hd2('Sorting, searching, and counting')
    
    hd3('Sorting')
    r('sort', (np.array([1,3,1,2.]),), {})
    m('lexsort')
    m('argsort')
    m('msort')
    m('sort_complex')
    m('partition')
    m('argpartition')
    
# sort(a[, axis, kind, order])    Return a sorted copy of an array.
# lexsort(keys[, axis])    Perform an indirect sort using a sequence of keys.
# argsort(a[, axis, kind, order])    Returns the indices that would sort an array.
# ndarray.sort([axis, kind, order])    Sort an array, in-place.
# msort(a)    Return a copy of an array sorted along the first axis.
# sort_complex(a)    Sort a complex array using the real part first, then the imaginary part.
# partition(a, kth[, axis, kind, order])    Return a partitioned copy of an array.
# argpartition(a, kth[, axis, kind, order])    Perform an indirect partition along the given axis using the algorithm specified by the kind keyword.
    
    a5 = np.arange(5)

    hd3('Searching')
    r('argmax', (a5,), {})
    r('nanargmax', (a5,), {})
    r('argmin', (a5,), {})
    r('nanargmin', (a5,), {})
    r('argwhere', (a5,), {})
    r('nonzero', (a5,), {})
    r('flatnonzero', (a5,), {})
    r('where', (a5>1,), {})
    r('searchsorted', (a5,a5), {})
    r('extract', (lambda x : x > 1, a5), {})

# argmax(a[, axis])    Indices of the maximum values along an axis.
# nanargmax(a[, axis])    Return the indices of the maximum values in the specified axis ignoring
# argmin(a[, axis])    Return the indices of the minimum values along an axis.
# nanargmin(a[, axis])    Return the indices of the minimum values in the specified axis ignoring
# argwhere(a)    Find the indices of array elements that are non-zero, grouped by element.
# nonzero(a)    Return the indices of the elements that are non-zero.
# flatnonzero(a)    Return indices that are non-zero in the flattened version of a.
# where(condition, [x, y])    Return elements, either from x or y, depending on condition.
# searchsorted(a, v[, side, sorter])    Find indices where elements should be inserted to maintain order.
# extract(condition, arr)    Return the elements of an array that satisfy some condition.    
    
    hd3('Counting')
    r('count_nonzero', (a5,), {})
    #count_nonzero(a)	Counts the number of non-zero values in the array a.
    
    

# histogram(a[, bins, range, normed, weights, ...])    Compute the histogram of a set of data.
# histogram2d(x, y[, bins, range, normed, weights])    Compute the bi-dimensional histogram of two data samples.
# histogramdd(sample[, bins, range, normed, ...])    Compute the multidimensional histogram of some data.
# bincount(x[, weights, minlength])    Count number of occurrences of each value in array of non-negative ints.
# digitize(x, bins[, right])    Return the indices of the bins to which each value in input array belongs.    

        
    global src
    src = '<html><body><table border=1>' + src + '</table></body></html>'    
    open(join(split(__file__)[0], 'api_compatibility.html'), 'w').write(src)
    
    print 'passed %d, not passed %d' % (num_passed, num_not_passed)

Example 117

Project: IkaLog Source File: player_name.py
def normalize_player_name(img_name, debug=False):
    img_name_w_norm = np.zeros((15, 250), dtype=np.uint8)
    img_name_w = matcher.MM_WHITE(sat=(0, 96), visibility=(48, 255))(img_name)

    img_name_x_hist = np.extract(
        np.sum(img_name_w, axis=0) > 128,
        np.arange(img_name_w.shape[1]),
    )

    img_name_y_hist = np.extract(
        np.sum(img_name_w, axis=1) > 128,
        np.arange(img_name_w.shape[0]),
    )

    if (len(img_name_x_hist) == 0) or (len(img_name_y_hist) == 0):
        # In some cases, we can't find any pixels.
        return img_name_w_norm

    img_name_left = np.min(img_name_x_hist)
    img_name_right = np.max(img_name_x_hist)

    img_name_top = np.min(img_name_y_hist)
    img_name_bottom = np.max(img_name_y_hist)

    # Cropping error? should be handled gracefully.
    if not (img_name_left < img_name_right):
        return None

    if not (img_name_top < img_name_bottom):
        return None

    img_name_w = img_name_w[
        img_name_top:img_name_bottom, img_name_left:img_name_right]

    img_name_w_norm[:, 0: img_name_w.shape[1]] = cv2.resize(
        img_name_w, (img_name_w.shape[1], 15))

    if debug:
        print(img_name_w_norm.shape)
        cv2.imshow('name', img_name_w_norm)
        cv2.waitKey(1)

    return img_name_w_norm

Example 118

Project: image_space Source File: imagefeatures_rest.py
    @access.public
    def getImageFeatures(self, params):
        try:
            import cv2
            import numpy as np
            cv2_available = True
        except ImportError:
            cv2_available = False

        # Disabling opencv for now
        cv2_available = False

        if 'url' in params:
            data = requests.get(params['url'], verify=False).content
        else:
            data = str(cherrypy.request.body.read())

        # Run Tika once
        parsed = parser.from_buffer(data)
        tika = {}
        for (k, v) in parsed["metadata"].iteritems():
            k = k.lower().replace(':', '_').replace(' ', '_').replace('-', '_')
            tika[k] = v[0] if type(v) is list and len(v) else v
        tika['content'] = parsed["content"]

        if cv2_available:
            file_bytes = np.asarray(bytearray(data), dtype=np.uint8)
            image = cv2.imdecode(file_bytes, flags=cv2.CV_LOAD_IMAGE_UNCHANGED)

            if image is not None:
                if len(image.shape) < 3 or image.shape[2] == 1:
                    image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)

                v = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
                v = v.flatten()
                hist = v / sum(v)
                tika['histogram'] = hist.tolist()

        tika['sha1sum_s_md'] = hashlib.sha1(bytearray(data)).hexdigest()

        return tika

Example 119

Project: tractor Source File: hennawi.py
def redqsos():
    # W4 detections without SDSS matches.
    T = fits_table('w4targets.fits')
    ps = None
    #ps = PlotSequence('redqso')

    arr = os.environ.get('PBS_ARRAYID')
    tag = '-b'
    if arr is not None:
        arr = int(arr)
        #chunk = 100
        chunk = 50
        T = T[arr * chunk: (arr+1) * chunk]
        print('Cut to chunk', (arr * chunk))
        tag = '-%03i' % arr
    
    sdss = DR9()
    sdss.useLocalTree()
    sdss.saveUnzippedFiles('data/unzip')

    mp = multiproc(1)

    #lvl = logging.DEBUG
    lvl = logging.INFO
    logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)

    newcols = {}
    origcols = T.get_columns()
    
    T.done = np.zeros(len(T), np.uint8)

    version = get_svn_version()
    print('SVN version info:', version)

    hdr = fitsio.FITSHDR()
    hdr.add_record(dict(name='PHOT_VER', value=version['Revision'],
                        comment='SVN revision'))
    hdr.add_record(dict(name='PHOT_URL', value=version['URL'], comment='SVN URL'))
    hdr.add_record(dict(name='PHOT_DAT', value=datetime.datetime.now().isoformat(),
                        comment='forced phot run time'))

    for i,(ra,dec) in enumerate(zip(T.ra, T.dec)):
        print()
        print(i)
        print('RA,Dec', ra, dec)
        r0,r1 = ra,ra
        d0,d1 = dec,dec

        #margin = 0.003
        # ~9 SDSS pixel half-box
        margin = 0.001
        
        dr = margin / np.cos(np.deg2rad((d0+d1)/2.))
        rlo = r0 - dr
        rhi = r1 + dr
        dlo = d0 - margin
        dhi = d1 + margin

        sky = True
        #sky = False

        t = T[np.array([i])]
        sdss_forced_phot(r0,r1,d0,d1, rlo, rhi, dlo, dhi, t, ps,
                         sdss=sdss, fitsky=sky)
        #print 'Columns:', t.get_columns()
        #t.about()
        
        for key in t.get_columns():
            if key in origcols:
                continue
            val = t.get(key)
            if not key in newcols:
                newcols[key] = np.zeros(len(T), val.dtype)
                T.set(key, newcols[key])
            print('set', key, i, '=', val[0])
            newcols[key][i] = val[0]
        T.done[i] = 1

        #if i and i % 100 == 0:
        if False:
            fn = 'wisew4phot-interim%s.fits' % tag
            T.writeto(fn, header=hdr)
            print('Wrote', fn)
            
    T.writeto('wisew4phot%s.fits' % tag, header=hdr)

Example 120

Project: pymba Source File: test_cameras.py
def test_cameras():
    # start Vimba
    with Vimba() as vimba:
        # get system object
        system = vimba.getSystem()

        # list available cameras (after enabling discovery for GigE cameras)
        if system.GeVTLIsPresent:
            system.runFeatureCommand("GeVDiscoveryAllOnce")
            time.sleep(0.2)
        cameraIds = vimba.getCameraIds()
        for cameraId in cameraIds:
            print 'Camera ID:', cameraId

        # get and open a camera
        camera0 = vimba.getCamera(cameraIds[0])
        camera0.openCamera()

        # list camera features
        cameraFeatureNames = camera0.getFeatureNames()
        for name in cameraFeatureNames:
            print 'Camera feature:', name

        # get the value of a feature
        print camera0.AcquisitionMode

        # set the value of a feature
        camera0.AcquisitionMode = 'SingleFrame'

        # create new frames for the camera
        frame0 = camera0.getFrame()  # creates a frame
        frame1 = camera0.getFrame()  # creates a second frame

        # announce frame
        frame0.announceFrame()

        # capture a camera image
        camera0.startCapture()
        frame0.queueFrameCapture()
        camera0.runFeatureCommand('AcquisitionStart')
        camera0.runFeatureCommand('AcquisitionStop')
        frame0.waitFrameCapture()

        # get image data...
        imgData = frame0.getBufferByteData()

        # ...or use NumPy for fast image display (for use with OpenCV, etc)
        import numpy as np

        moreUsefulImgData = np.ndarray(buffer=frame0.getBufferByteData(),
                                       dtype=np.uint8,
                                       shape=(frame0.height,
                                              frame0.width,
                                              1))

        # clean up after capture
        camera0.endCapture()
        camera0.revokeAllFrames()

        # close camera
        camera0.closeCamera()

Example 121

Project: volumina Source File: brushingmodel.py
    def endDrawing(self, pos):
        has_moved = self._hasMoved # _hasMoved will change after calling moveTo
        if has_moved:
            self.moveTo(pos)
        else:
            assert(self.pos == pos)
            self.moveTo(QPointF(pos.x()+0.0001, pos.y()+0.0001)) # move a little

        # Qt seems to use strange rules for determining which pixels to set when rendering a brush stroke to a QImage.
        # We seem to get better results if we do the following:
        # 1) Slightly offset the source window because apparently there is a small shift in the data
        # 2) Render the scene to an image that is MUCH larger than the scene resolution (4x by 4x)
        # 3) Downsample each 4x4 patch from the large image back to a single pixel in the final image,
        #     applying some threshold to determine if the final pixel is on or off. 

        tempi = QImage(QSize(4*self.bb.width(), 4*self.bb.height()), QImage.Format_ARGB32_Premultiplied) #TODO: format
        tempi.fill(0)
        painter = QPainter(tempi)
        # Offset the source window.  At first I thought the right offset was 0.5, because 
        #  that would seem to make sure points are rounded to pixel CENTERS, but 
        #  experimentation indicates that 0.25 is slightly better for some reason...
        source_rect = QRectF( QPointF(self.bb.x()+0.25, self.bb.y()+0.25), 
                              QSizeF(self.bb.width(), self.bb.height()) )
        target_rect = QRectF( QPointF(0,0),
                             QSizeF(4*self.bb.width(), 4*self.bb.height()) )
        self.scene.render(painter, target=target_rect, source=source_rect)
        painter.end()

        # Now downsample: convert each 4x4 patch into a single pixel by summing and dividing
        ndarr = qimage2ndarray.rgb_view(tempi)[:,:,0].astype(int)
        ndarr = ndarr.reshape( (ndarr.shape[0],) + (ndarr.shape[1]//4,) + (4,) )
        ndarr = ndarr.sum(axis=-1)
        ndarr = ndarr.transpose()
        ndarr = ndarr.reshape( (ndarr.shape[0],) + (ndarr.shape[1]//4,) + (4,) )
        ndarr = ndarr.sum(axis=-1)
        ndarr = ndarr.transpose()
        ndarr //= 4*4

        downsample_threshold = (7./16)*255
        labels = numpy.where(ndarr>=downsample_threshold, numpy.uint8(self.drawnNumber), numpy.uint8(0))
        labels = labels.swapaxes(0,1)
        assert labels.shape[0] == self.bb.width()
        assert labels.shape[1] == self.bb.height()

        ##
        ## ensure that at least one pixel is label when the brush size is 1
        ##
        ## this happens when the user just clicked without moving
        ## in that case the lineitem will be so tiny, that it won't be rendered
        ## into a single pixel by the code above
        if not has_moved and self.brushSize <= 1 and numpy.count_nonzero(labels) == 0:
            labels[labels.shape[0]//2, labels.shape[1]//2] = self.drawnNumber

        self.brushStrokeAvailable.emit(QPointF(self.bb.x(), self.bb.y()), labels)

Example 122

Project: volumina Source File: imagesources.py
    def toImage( self ):
        t = time.time()
        
        tWAIT = time.time()
        self._arrayreq.wait()
        tWAIT = 1000.0*(time.time()-tWAIT)
        
        tAR = time.time()
        a = self._arrayreq.getResult()
        tAR = 1000.0*(time.time()-tAR)
        
        assert a.ndim == 2

        if self._normalize and self._normalize[0] < self._normalize[1]:
            nmin, nmax = self._normalize
            if nmin:
                a = a - nmin
            scale = (len(self._colorTable)-1) / float(nmax - nmin + 1e-35) #if max==min
            if scale != 1.0:
                a = a * scale
            if len(self._colorTable) <= 2**8:
                a = np.asanyarray( a, dtype=np.uint8 )
            elif len(self._colorTable) <= 2**16:
                a = np.asanyarray( a, dtype=np.uint16 )
            elif len(self._colorTable) <= 2**32:
                a = np.asanyarray( a, dtype=np.uint32 )

        # Use vigra if possible (much faster)
        tImg = None
        if _has_vigra and hasattr(vigra.colors, 'applyColortable'):
            tImg = time.time()
            img = QImage(a.shape[1], a.shape[0], QImage.Format_ARGB32)
            if not issubclass( a.dtype.type, np.integer ):
                raise NotImplementedError()
                #FIXME: maybe this should be done in a better way using an operator before the colortable request which properly handles 
                #this problem 
                warnings.warn("Data for colortable layers cannot be float, casting",RuntimeWarning)
                a = np.asanyarray(a, dtype=np.uint32)

            # If we have a masked array with a non-trivial mask, ensure that mask is made transparent.
            _colorTable = self._colorTable
            if np.ma.is_masked(a):
                # Add transparent color at the beginning of the colortable as needed.
                if (_colorTable[0, 3] != 0):
                    # If label 0 is unused, it can be transparent. Otherwise, the transparent color must be inserted.
                    if (a.min() == 0):
                        # If it will overflow simply promote the type. Unless we have reached the max VIGRA type.
                        if (a.max() == np.iinfo(a.dtype).max):
                            a_new_dtype = np.min_scalar_type(np.iinfo(a.dtype).max + 1)
                            if a_new_dtype <= np.dtype(np.uint32):
                                 a = np.asanyarray(a, dtype=a_new_dtype)
                            else:
                                assert (np.iinfo(a.dtype).max >= len(_colorTable)), \
                                       "This is a very large colortable. If it is indeed needed, add a transparent" + \
                                       " color at the beginning of the colortable for displaying masked arrays."

                                # Try to wrap the max value to a smaller value of the same color.
                                a[a == np.iinfo(a.dtype).max] %= len(_colorTable)

                        # Insert space for transparent color and shift labels up.
                        _colorTable = np.insert(_colorTable, 0, 0, axis=0)
                        a[:] = a+1
                    else:
                        # Make sure the first color is transparent.
                        _colorTable = _colorTable.copy()
                        _colorTable[0] = 0

                # Make masked values transparent.
                a = np.ma.filled(a, 0)

            if a.dtype in (np.uint64, np.int64):
                # FIXME: applyColortable() doesn't support 64-bit, so just truncate
                a = a.astype(np.uint32)

            vigra.colors.applyColortable(a.astype(np.uint32), _colorTable, byte_view(img))
            tImg = 1000.0*(time.time()-tImg)

        # Without vigra, do it the slow way 
        else:
            raise NotImplementedError()
            if _has_vigra:
                # If this warning is annoying you, try this:
                # warnings.filterwarnings("once")
                warnings.warn("Using slow colortable images.  Upgrade to VIGRA > 1.9 to use faster implementation.")

            #make sure that a has values in range [0, colortable_length)
            a = np.remainder(a, len(self._colorTable))
            #apply colortable
            colortable = np.roll(np.fliplr(self._colorTable), -1, 1) # self._colorTable is BGRA, but array2qimage wants RGBA
            img = colortable[a]
            img = array2qimage(img)
            
        if self.logger.isEnabledFor(logging.DEBUG):
            tTOT = 1000.0*(time.time()-t)
            self.logger.debug("toImage (%dx%d) took %f msec. (array req: %f, wait: %f, img: %f)" % (img.width(), img.height(), tTOT, tAR, tWAIT, tImg))

        return img 

Example 123

Project: EyeTab Source File: eyelid_locator.py
def find_upper_eyelid(eye_img, debug_index):

    u_2_win_rats_w = [0.0, 1.0, 0.0]              # Margins around ROI windows
    u_2_win_rats_h = [0.0, 0.5, 0.5]

    # FIXME - using r channel?
    img_blue = cv2.split(eye_img)[2]
    img_w, img_h = eye_img.shape[:2]
    
    # Indexes to extract window sub-images
    w_y1, w_y2 = int(img_h * u_2_win_rats_h[0]), int(img_h * sum(u_2_win_rats_h[:2]))
    w_x1, w_x2 = int(img_w * u_2_win_rats_w[0]), int(img_w * sum(u_2_win_rats_w[:2]))

    # Split image into two halves
    window_img = img_blue[w_y1:w_y2, w_x1:w_x2]
    
    # Supress eyelashes
    morph_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    window_img = cv2.morphologyEx(window_img, cv2.MORPH_CLOSE, morph_kernel)
 
    # Filter right half with inverse kernel of left half to ignore iris/sclera boundary    
    filter_img_win = cv2.filter2D(window_img, -1, __gabor_kern_horiz)
    
    # Copy windows back into correct places in full filter image
    filter_img = np.zeros(eye_img.shape[:2], dtype=np.uint8)
    filter_img[w_y1:w_y2, w_x1:w_x2] = filter_img_win
    
    # Mask with circles
    cv2.circle(filter_img, (3*filter_img.shape[1]/7, filter_img.shape[0]/2), filter_img.shape[1]/4, 0,-1)
    cv2.circle(filter_img, (4*filter_img.shape[1]/7, filter_img.shape[0]/2), filter_img.shape[1]/4, 0,-1)
    
    ys = np.argmax(filter_img, axis=0)
    xs = np.arange(filter_img.shape[1])[ys > 0]
    ys = (ys)[ys > 0]

    u_lid_pts = []
    
    for i, x in enumerate(xs):
        col = filter_img.T[x]
        start_ind, end_ind = ys[i] + 5, min(ys[i] + 100, len(col) - 2)
        col_window = col[start_ind:end_ind]
        max_col = np.max(col)
        max_win = np.max(col_window)
        if max_col - max_win < 50 :
            new_y = np.argmax(col_window) + ys[i] + 5
            u_lid_pts.append((x, new_y))
        else:u_lid_pts.append((x, ys[i]))
    
    # Only RANSAC fit eyelid if there are enough points
    if len(u_lid_pts) < __min_num_pts_u * 2:
        eyelid_upper_parabola = None
        u_lid_pts = []
    else:
        u_lid_pts_l = [(x,y) for (x,y) in u_lid_pts if x < filter_img.shape[1]/2]
        u_lid_pts_r = [(x,y) for (x,y) in u_lid_pts if x > filter_img.shape[1]/2]
        
        # Fit eye_img coord points of sclera-segs to degree 2 polynomial
        # a(x^2) + b(x) + c
        eyelid_upper_parabola = ransac_parabola(u_lid_pts_l, u_lid_pts_r,
                                                ransac_iters_max=5,
                                                refine_iters_max=2,
                                                max_err=4)
    if eyelid_upper_parabola is not None:
        a, b, c = eyelid_upper_parabola
        c = c - __parabola_y_offset
        eyelid_upper_parabola = a, b, c

    # --------------------- Debug Drawing ---------------------
    if debug_index:
        debug_img = eye_img.copy()
        
        if eyelid_upper_parabola is not None:
            lid_xs = np.arange(21) * img_w / 20
            lid_ys = a * lid_xs ** 2 + b * lid_xs + c
            lid_pts = np.dstack([lid_xs, lid_ys]).astype(int)
            cv2.polylines(debug_img, lid_pts, False, (0, 255, 0), 1)
        
        draw_points(debug_img, u_lid_pts, (0, 0, 255), 1,2)
        filter_img = cv2.cvtColor(filter_img, cv2.COLOR_GRAY2BGR)
        draw_points(filter_img, u_lid_pts, (0, 0, 255), 1,2)
        
        stacked_windows = stack_imgs_vertical([window_img, filter_img])
        stacked_imgs = stack_imgs_horizontal([stacked_windows, debug_img])
        __debug_imgs_upper[debug_index] = stacked_imgs
    
        if debug_index > 2:
            cv2.imshow(__winname + repr(debug_index) + "u", stacked_imgs);
    # --------------------- Debug Drawing ---------------------

    return eyelid_upper_parabola

Example 124

Project: imageio Source File: test_ffmpeg.py
def test_writer_pixelformat_size_verbose(tmpdir):
    # Check that video pixel format and size get written as expected.
    need_internet()
    # Make sure verbose option works and that default pixelformat is yuv420p
    tmpf = tmpdir.join('test.mp4', fps=30)
    W = imageio.get_writer(str(tmpf), ffmpeg_log_level='debug')
    nframes = 4  # Number of frames in video
    for i in range(nframes):
        # Use size divisible by 16 or it gets changed.
        W.append_data(np.zeros((64, 64, 3), np.uint8))
    W.close()

    # Check that video is correct size & default output video pixel format
    # is correct
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    assert "64x64" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header

    # Now check that macroblock size gets turned off if requested
    W = imageio.get_writer(str(tmpf), macro_block_size=None,
                           ffmpeg_log_level='debug')
    for i in range(nframes):
        W.append_data(np.zeros((100, 106, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    assert "106x100" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header

    # Now double check values different than default work
    W = imageio.get_writer(str(tmpf), macro_block_size=4,
                           ffmpeg_log_level='debug')
    for i in range(nframes):
        W.append_data(np.zeros((64, 65, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    assert "68x64" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header

    # Now check that the macroblock works as expected for the default of 16
    W = imageio.get_writer(str(tmpf), ffmpeg_log_level='debug')
    for i in range(nframes):
        W.append_data(np.zeros((111, 140, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    # Check for warning message with macroblock
    assert "144x112" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header

Example 125

Project: pycortex Source File: quickflat.py
Function: make
def make(braindata, height=1024, recache=False, **kwargs):
    mask, extents = get_flatmask(braindata.subject, height=height, recache=recache)
    
    if not hasattr(braindata, "xfmname"):
        pixmap = get_flatcache(braindata.subject,
                               None,
                               height=height,
                               recache=recache,
                               **kwargs)
        data = braindata.vertices
        if isinstance(braindata, dataset.Vertex2D):
            data = braindata.raw.vertices
        else:
            data = braindata.vertices
    else:
        pixmap = get_flatcache(braindata.subject,
                               braindata.xfmname,
                               height=height,
                               recache=recache,
                               **kwargs)
        if isinstance(braindata, dataset.Volume2D):
            data = braindata.raw.volume
        else:
            data = braindata.volume

    if data.shape[0] > 1:
        raise ValueError("Cannot flatten movie views")

    if data.dtype == np.uint8:
        img = np.zeros(mask.shape+(4,), dtype=np.uint8)
        img[mask] = pixmap * data.reshape(-1, 4)
        return img.transpose(1,0,2)[::-1], extents
    else:
        badmask = np.array(pixmap.sum(1) > 0).ravel()
        img = (np.nan*np.ones(mask.shape)).astype(braindata.data.dtype)
        mimg = (np.nan*np.ones(badmask.shape)).astype(braindata.data.dtype)
        mimg[badmask] = (pixmap*data.ravel())[badmask].astype(mimg.dtype)
        img[mask] = mimg

        return img.T[::-1], extents

Example 126

Project: vsm Source File: ldacgsseq.py
Function: init
    def __init__(self, corpus=None, context_type=None,
                 K=20, V=0, alpha=[], beta=[], seed=None):
        """
        Initialize LdaCgsSeq.

        :param corpus: Source of observed data.
        :type corpus: `Corpus`

        :param context_type: Name of tokenization stored in `corpus` whose tokens
            will be treated as docuements.
        :type context_type: string, optional

        :param K: Number of topics. Default is `20`.
        :type K: int, optional

        :param alpha: Docuement priors. Default is a flat prior of 0.01
            for all topics.
        :type alpha: list, optional

        :param beta: Topic priors. Default is 0.01 for all words.
        :type beta: list, optional

        :param seed: Seed for numpy's RandomState. Default is `None`.
        :type seed: int, optional
        """

        self.context_type = context_type
        self.K = K

        if corpus:
            self.V = corpus.words.size
            self.indices = corpus.view_contexts(self.context_type, as_indices=True)
            self.corpus = corpus.corpus
            self.dtype = corpus.corpus.dtype
        else:
            self.V = V
            self.indices = []
            self.corpus = []
            self.dtype = None

        if self.K < 2 ** 8:
            self.Ktype = np.uint8
        elif self.K < 2 ** 16:
            self.Ktype = np.uint16
        else:
            raise RuntimeError("More than 65536 topics are not supported")

        self.indices = np.array(self.indices, dtype='i')
        self.Z = np.zeros_like(self.corpus, dtype=self.Ktype)

        priors = init_priors(self.V, self.K, beta, alpha)
        self.beta, self.alpha = priors
        self.beta = self.beta.astype(np.float32)
        self.alpha = self.alpha.astype(np.float32)

        self.word_top = (np.zeros((self.V, self.K), dtype=np.float32)
                         + self.beta)
        if self.V==0:
            self.inv_top_sums = np.inf
        else:
            self.inv_top_sums = 1. / self.word_top.sum(0)
        self.top_doc = (np.zeros((self.K, len(self.indices)),
                                 dtype=np.float32) + self.alpha)

        self.iteration = 0
        self.log_probs = []

        if seed is None:
            maxint = np.iinfo(np.int32).max
            self.seed = np.random.randint(0, maxint)
        else:
            self.seed = seed
        self._mtrand_state = np.random.RandomState(self.seed).get_state()

Example 127

Project: hedge Source File: el_local_shared_segmat.py
    @memoize_method
    def get_kernel(self, with_scaling, for_benchmark=False):
        from cgen import \
                Pointer, POD, Value, ArrayOf, \
                Module, FunctionDeclaration, FunctionBody, Block, \
                Line, Define, Include, \
                Initializer, If, For, Statement, Assign, \
                ArrayInitializer

        from cgen import dtype_to_ctype
        from cgen.cuda import CudaShared, CudaConstant, CudaGlobal

        discr = self.discr
        d = discr.dimensions
        dims = range(d)
        given = self.plan.given

        float_type = given.float_type

        f_decl = CudaGlobal(FunctionDeclaration(Value("void", "apply_el_local_mat_smem_mat"),
            [
                Pointer(POD(float_type, "out_vector")),
                Pointer(POD(numpy.uint8, "gmem_matrix")),
                Pointer(POD(float_type, "debugbuf")),
                POD(numpy.uint32, "microblock_count"),
                ]
            ))

        cmod = Module([
                Include("pycuda-helpers.hpp"),
                Line(),
                Value("texture<fp_tex_%s, 1, cudaReadModeElementType>"
                    % dtype_to_ctype(float_type),
                    "in_vector_tex"),
                ])
        if with_scaling:
            cmod.append(
                Value("texture<fp_tex_%s, 1, cudaReadModeElementType>"
                    % dtype_to_ctype(float_type),
                    "scaling_tex"),
                )

        par = self.plan.parallelism

        cmod.extend([
                Line(),
                Define("DIMENSIONS", discr.dimensions),
                Define("DOFS_PER_EL", given.dofs_per_el()),
                Define("PREIMAGE_DOFS_PER_EL", self.plan.preimage_dofs_per_el),
                Line(),
                Define("SEGMENT_DOF", "threadIdx.x"),
                Define("PAR_MB_NR", "threadIdx.y"),
                Line(),
                Define("MB_SEGMENT", "blockIdx.x"),
                Define("MACROBLOCK_NR", "blockIdx.y"),
                Line(),
                Define("DOFS_PER_SEGMENT", self.plan.segment_size),
                Define("SEGMENTS_PER_MB", self.plan.segments_per_microblock()),
                Define("ALIGNED_DOFS_PER_MB", given.microblock.aligned_floats),
                Define("ALIGNED_PREIMAGE_DOFS_PER_MB",
                    self.plan.aligned_preimage_dofs_per_microblock),
                Define("MB_EL_COUNT", given.microblock.elements),
                Line(),
                Define("PAR_MB_COUNT", par.parallel),
                Define("INLINE_MB_COUNT", par.inline),
                Define("SEQ_MB_COUNT", par.serial),
                Line(),
                Define("THREAD_NUM", "(SEGMENT_DOF+PAR_MB_NR*DOFS_PER_SEGMENT)"),
                Define("COALESCING_THREAD_COUNT", "(PAR_MB_COUNT*DOFS_PER_SEGMENT)"),
                Line(),
                Define("MB_DOF_BASE", "(MB_SEGMENT*DOFS_PER_SEGMENT)"),
                Define("MB_DOF", "(MB_DOF_BASE+SEGMENT_DOF)"),
                Define("GLOBAL_MB_NR_BASE",
                    "(MACROBLOCK_NR*PAR_MB_COUNT*INLINE_MB_COUNT*SEQ_MB_COUNT)"),
                Define("GLOBAL_MB_NR",
                    "(GLOBAL_MB_NR_BASE"
                    "+ (seq_mb_number*PAR_MB_COUNT + PAR_MB_NR)*INLINE_MB_COUNT)"),
                Define("GLOBAL_MB_DOF_BASE", "(GLOBAL_MB_NR*ALIGNED_DOFS_PER_MB)"),
                Define("GLOBAL_MB_PREIMG_DOF_BASE", "(GLOBAL_MB_NR*ALIGNED_PREIMAGE_DOFS_PER_MB)"),
                Line(),
                Define("MATRIX_COLUMNS", self.plan.gpu_matrix_columns()),
                Define("MATRIX_SEGMENT_FLOATS", self.plan.gpu_matrix_block_floats()),
                Define("MATRIX_SEGMENT_BYTES",
                    "(MATRIX_SEGMENT_FLOATS*%d)" % given.float_size()),

                Line(),
                CudaShared(ArrayOf(POD(float_type, "smem_matrix"),
                    "MATRIX_SEGMENT_FLOATS")),
                CudaShared(
                    ArrayOf(
                        ArrayOf(
                            ArrayOf(
                                POD(float_type, "dof_buffer"),
                                "PAR_MB_COUNT"),
                            "INLINE_MB_COUNT"),
                        "DOFS_PER_SEGMENT"),
                    ),
                CudaShared(POD(numpy.uint16, "segment_start_el")),
                CudaShared(POD(numpy.uint16, "segment_stop_el")),
                CudaShared(POD(numpy.uint16, "segment_el_count")),
                Line(),
                ArrayInitializer(
                        CudaConstant(
                            ArrayOf(
                                POD(numpy.uint32, "segment_start_el_lookup"),
                            "SEGMENTS_PER_MB")),
                        [(chk*self.plan.segment_size)//given.dofs_per_el()
                            for chk in range(self.plan.segments_per_microblock())]
                        ),
                ArrayInitializer(
                        CudaConstant(
                            ArrayOf(
                                POD(numpy.uint32, "segment_stop_el_lookup"),
                            "SEGMENTS_PER_MB")),
                        [min(given.microblock.elements,
                            (chk*self.plan.segment_size+self.plan.segment_size-1)
                                //given.dofs_per_el()+1)
                            for chk in range(self.plan.segments_per_microblock())]
                        ),
                ])

        S = Statement
        f_body = Block()

        f_body.extend_log_block("calculate this dof's element", [
            Initializer(POD(numpy.uint8, "mb_el"),
                "MB_DOF/DOFS_PER_EL") ])

        if self.plan.use_prefetch_branch:
            f_body.extend_log_block("calculate segment responsibility data", [
                If("THREAD_NUM==0",
                    Block([
                        Assign("segment_start_el", "segment_start_el_lookup[MB_SEGMENT]"),
                        Assign("segment_stop_el", "segment_stop_el_lookup[MB_SEGMENT]"),
                        Assign("segment_el_count", "segment_stop_el-segment_start_el"),
                        ])
                    ),
                S("__syncthreads()")
                ])

        from hedge.backends.cuda.tools import get_load_code
        f_body.extend(
            get_load_code(
                dest="smem_matrix",
                base=("gmem_matrix + MB_SEGMENT*MATRIX_SEGMENT_BYTES"),
                bytes="MATRIX_SEGMENT_BYTES",
                descr="load matrix segment")
            +[S("__syncthreads()")]
            )

        # ---------------------------------------------------------------------
        def get_batched_fetch_mat_mul_code(el_fetch_count):
            result = []
            dofs = range(self.plan.preimage_dofs_per_el)

            for load_segment_start in range(0, self.plan.preimage_dofs_per_el,
                    self.plan.segment_size):
                result.extend(
                        [S("__syncthreads()")]
                        +[Assign(
                            "dof_buffer[PAR_MB_NR][%d][SEGMENT_DOF]" % inl,
                            "fp_tex1Dfetch(in_vector_tex, "
                            "GLOBAL_MB_PREIMG_DOF_BASE"
                            " + %d*ALIGNED_PREIMAGE_DOFS_PER_MB"
                            " + (segment_start_el)*PREIMAGE_DOFS_PER_EL + %d + SEGMENT_DOF)"
                            % (inl, load_segment_start)
                            )
                        for inl in range(par.inline)]
                        +[S("__syncthreads()"),
                        Line(),
                        ])

                for dof in dofs[load_segment_start:load_segment_start+self.plan.segment_size]:
                    for inl in range(par.inline):
                        result.append(
                                S("result%d += "
                                    "smem_matrix[SEGMENT_DOF*MATRIX_COLUMNS + %d]"
                                    "*"
                                    "dof_buffer[PAR_MB_NR][%d][%d]"
                                    % (inl, dof, inl, dof-load_segment_start))
                                )
                result.append(Line())
            return result

        from hedge.backends.cuda.tools import unroll
        def get_direct_tex_mat_mul_code():
            return (
                    [POD(float_type, "fof%d" % inl) for inl in range(par.inline)]
                    + [POD(float_type, "lm"), Line()]
                    + unroll(
                        lambda j: [
                        Assign("fof%d" % inl,
                            "fp_tex1Dfetch(in_vector_tex, "
                            "GLOBAL_MB_PREIMG_DOF_BASE"
                            " + %(inl)d * ALIGNED_PREIMAGE_DOFS_PER_MB"
                            " + mb_el*PREIMAGE_DOFS_PER_EL+%(j)s)"
                            % {"j":j, "inl":inl, "row": "SEGMENT_DOF"},)
                        for inl in range(par.inline)
                        ]+[
                        Assign("lm",
                            "smem_matrix["
                            "%(row)s*MATRIX_COLUMNS + %(j)s]"
                            % {"j":j, "row": "SEGMENT_DOF"},
                            )
                        ]+[
                        S("result%(inl)d += fof%(inl)d*lm" % {"inl":inl})
                        for inl in range(par.inline)
                        ],
                        total_number=self.plan.preimage_dofs_per_el,
                        max_unroll=self.plan.max_unroll)
                    + [Line()])

        def get_mat_mul_code(el_fetch_count):
            if el_fetch_count == 1:
                return get_batched_fetch_mat_mul_code(el_fetch_count)
            else:
                return get_direct_tex_mat_mul_code()

        def mat_mul_outer_loop(fetch_count):
            if with_scaling:
                inv_jac_multiplier = ("fp_tex1Dfetch(scaling_tex,"
                        "(GLOBAL_MB_NR + %(inl)d)*MB_EL_COUNT + mb_el)")
            else:
                inv_jac_multiplier = "1"

            write_condition = "MB_DOF < DOFS_PER_EL*MB_EL_COUNT"
            if self.with_index_check:
                write_condition += " && GLOBAL_MB_NR < microblock_count"
            return For("unsigned short seq_mb_number = 0",
                "seq_mb_number < SEQ_MB_COUNT",
                "++seq_mb_number",
                Block([
                    Initializer(POD(float_type, "result%d" % inl), 0)
                    for inl in range(par.inline)
                    ]+[Line()]
                    +get_mat_mul_code(fetch_count)
                    +[
                    If(write_condition,
                        Block([
                            Assign(
                                "out_vector[GLOBAL_MB_DOF_BASE"
                                " + %d*ALIGNED_DOFS_PER_MB"
                                " + MB_DOF]" % inl,
                                "result%d * %s" % (inl, (inv_jac_multiplier % {"inl":inl}))
                                )
                            for inl in range(par.inline)
                            ])
                        )
                    ])
                )

        if self.plan.use_prefetch_branch:
            from cgen import make_multiple_ifs
            f_body.append(make_multiple_ifs([
                    ("segment_el_count == %d" % fetch_count,
                        mat_mul_outer_loop(fetch_count))
                    for fetch_count in
                    range(1, self.plan.max_elements_touched_by_segment()+1)]
                    ))
        else:
            f_body.append(mat_mul_outer_loop(0))

        # finish off ----------------------------------------------------------
        cmod.append(FunctionBody(f_decl, f_body))

        if not for_benchmark and "cuda_dump_kernels" in discr.debug:
            from hedge.tools import open_unique_debug_file
            open_unique_debug_file(self.plan.debug_name, ".cu").write(str(cmod))

        mod = SourceModule(cmod,
                keep="cuda_keep_kernels" in discr.debug,
                #options=["--maxrregcount=12"]
                )

        func = mod.get_function("apply_el_local_mat_smem_mat")

        if self.plan.debug_name in discr.debug:
            print "%s: lmem=%d smem=%d regs=%d" % (
                    self.plan.debug_name,
                    func.local_size_bytes,
                    func.shared_size_bytes,
                    func.num_regs)

        in_vector_texref = mod.get_texref("in_vector_tex")
        texrefs = [in_vector_texref]

        if with_scaling:
            scaling_texref = mod.get_texref("scaling_tex")
            texrefs.append(scaling_texref)
        else:
            scaling_texref = None

        func.prepare(
                "PPPI",
                block=(self.plan.segment_size, self.plan.parallelism.parallel, 1),
                texrefs=texrefs)

        return func, in_vector_texref, scaling_texref

Example 128

Project: glumpy Source File: program.py
Function: draw
    def draw(self, mode = gl.GL_TRIANGLES, indices=None): #first=0, count=None):
        """ Draw using the specified mode & indices.

        :param gl.GLEnum mode: 
          One of
            * GL_POINTS
            * GL_LINES
            * GL_LINE_STRIP
            * GL_LINE_LOOP,
            * GL_TRIANGLES
            * GL_TRIANGLE_STRIP
            * GL_TRIANGLE_FAN

        :param IndexBuffer|None indices:
            Vertex indices to be drawn. If none given, everything is drawn.
        """

        self.activate()
        attributes = self._attributes.values()

        # Get buffer size first attribute
        # We need more tests here
        #  - do we have at least 1 attribute ?
        #  - does all attributes report same count ?
        # count = (count or attributes[0].size) - first

        if isinstance(indices, IndexBuffer):
            indices.activate()
            gltypes = { np.dtype(np.uint8) : gl.GL_UNSIGNED_BYTE,
                        np.dtype(np.uint16): gl.GL_UNSIGNED_SHORT,
                        np.dtype(np.uint32): gl.GL_UNSIGNED_INT }
            gl.glDrawElements(mode, indices.size, gltypes[indices.dtype], None)
            indices.deactivate()
        else:
            first = 0
            # count = (self._count or attributes[0].size) - first
            count = len(tuple(attributes)[0])
            gl.glDrawArrays(mode, first, count)

        gl.glBindBuffer( gl.GL_ARRAY_BUFFER, 0 )
        self.deactivate()

Example 129

Project: chaptcha Source File: chaptcha.py
Function: vis
def vis(fpath):
    def to_rgb(img):
        return cv2.merge([img] * 3)

    BOX_W = CAPTCHA_WIDTH // NUM_CHARS
    PAD_W = (BOX_W - CH_WIDTH) // 2
    PAD_H = (CAPTCHA_HEIGHT - CH_HEIGHT) // 2
    EXTRA_PAD_W = (CAPTCHA_WIDTH % NUM_CHARS) // 2
    HIGH_COLOR = (0, 255, 0)

    # Real result used for OCR.
    orig = get_image(fpath)
    try:
        ch_imgs = segment(orig)
    except Exception:
        traceback.print_exc()
        ch_imgs = [np.zeros((CH_HEIGHT, CH_WIDTH), dtype=np.uint8)] * NUM_CHARS

    # Visualizations.
    denoised = _denoise(orig)
    with_lines = to_rgb(denoised.copy())
    for line in _get_lines(denoised):
        x1, y1, x2, y2 = line
        cv2.line(with_lines, (x1, y1), (x2, y2), HIGH_COLOR, LINE_THICK)
    processed = _preprocess(orig)
    # cv2.imwrite('vis.png', processed)
    with_rects = [np.pad(a, ((PAD_H,), (PAD_W,)), _CONSTANT)
                  for a in ch_imgs]
    with_rects = np.concatenate(with_rects, axis=1)
    with_rects = np.pad(with_rects, ((0,), (EXTRA_PAD_W,)), _CONSTANT)
    with_rects = to_rgb(with_rects)
    for i in range(NUM_CHARS):
        x1 = i * BOX_W + PAD_W + EXTRA_PAD_W - 1
        x2 = x1 + CH_WIDTH + 1
        y1 = PAD_H - 1
        y2 = y1 + CH_HEIGHT + 1
        cv2.rectangle(with_rects, (x1, y1), (x2, y2), HIGH_COLOR, 1)

    res = np.concatenate((
        to_rgb(orig),
        to_rgb(denoised),
        with_lines,
        to_rgb(processed),
        with_rects))
    cv2.imshow('opencv-result', res)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

Example 130

Project: PyCV-time Source File: fmatch.py
def test_feature_matching_realtime(detetor=cv2.ORB()):
    from cam import MyCam
    """
    Press 's' to take a picture or 'l' to load one and start real-time
    """

    MIN_MATCH_COUNT = 10

    cam = MyCam()
    cam.size = (640, 480)
    img1 = img1 = cv2.imread('box.png', 0)

    cv2.imshow('source', img1)
    while True:
        
        img2 = cv2.flip(cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY), 1)
        k = cv2.waitKey(5)
        if k == ord('s'):
            img1 = img2.copy()
            cv2.imwrite('campic.png', img1)
        elif k== 27:
            break
        
        
        
        # find the keypoints and descriptors with ORB
        if k is not None:
            cv2.destroyWindow('preview') 
            kp1, des1 = detetor.detectAndCompute(img1,None)
            
        kp2, des2 = detetor.detectAndCompute(img2,None)
        
        
        # If nothing match then continue
        if des2 is None:
            img3 = img3 = draw_match(img1,kp1,img2,kp2,[])
            continue
        
        des1 = des1.astype(np.uint8, copy=False)    # Fix the data type
        des2 = des2.astype(np.uint8, copy=False)
        
        
        # Now match describers
        bf = cv2.BFMatcher(cv2.NORM_HAMMING)
        # matches = bf.match(des1,des2)
        
        matches = bf.knnMatch(des1,des2, k=2)
        
        # m = matches[0][0]
        # p1, p2 = np.float32(kp1[m.queryIdx].pt), np.float32(kp2[m.trainIdx].pt)
        # print m.distance, p1, p2
        
        # Apply ratio test
        good = []
        try:
            for m,n in matches:
                if m.distance < 0.7*n.distance:
                    good.append(m)
        except ValueError:
            good = []
        
        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()
            
            h,w = img1.shape
            pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
            dst = cv2.perspectiveTransform(pts,M)
            
            cv2.polylines(img2,[np.int32(dst)], True, (0,0,255) ,3)

        else:
            # print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
            matchesMask = None
            good = []
        
        img3 = draw_match(img1,kp1,img2,kp2,good, matchesMask=matchesMask)
        
        
        cv2.imshow('matches', img3)
        
    print 'press any key to continue'

Example 131

Project: lantz Source File: tasks.py
    @Action(units=(None, 'seconds', None), values=(None, None, _GROUP_BY))
    def read(self, samples_per_channel=None, timeout=10.0, group_by='scan'):
        """
        Reads multiple samples from each digital line in a task. Each
        line in a channel gets one byte per sample.

        :param samples_per_channel: int or None

          The number of samples, per channel, to
          read. The default value of -1 (DAQmx_Val_Auto) reads all
          available samples. If readArray does not contain enough
          space, this function returns as many samples as fit in
          readArray.

          NI-DAQmx determines how many samples to read based on
          whether the task acquires samples continuously or acquires a
          finite number of samples.

          If the task acquires samples continuously and you set this
          parameter to -1, this function reads all the samples
          currently available in the buffer.

          If the task acquires a finite number of samples and you set
          this parameter to -1, the function waits for the task to
          acquire all requested samples, then reads those samples. If
          you set the Read All Available Data property to TRUE, the
          function reads the samples currently available in the buffer
          and does not wait for the task to acquire all requested
          samples.

        :param timeout: float

          The amount of time, in seconds, to wait for the function to
          read the sample(s). The default value is 10.0 seconds. To
          specify an infinite wait, pass -1
          (DAQmx_Val_WaitInfinitely). This function returns an error
          if the timeout elapses.

          A value of 0 indicates to try once to read the requested
          samples. If all the requested samples are read, the function
          is successful. Otherwise, the function returns a timeout
          error and returns the samples that were actually read.

        :param group_by: {'group', 'scan'}

          Specifies whether or not the samples are interleaved:

            'channel' - Group by channel (non-interleaved).

            'scan' - Group by scan number (interleaved).

        Returns
        -------

          data : array

            The array to read samples into. Each `bytes_per_sample`
            corresponds to one sample per channel, with each element
            in that grouping corresponding to a line in that channel,
            up to the number of lines contained in the channel.

          bytes_per_sample : int

            The number of elements in returned `data` that constitutes
            a sample per channel. For each sample per channel,
            `bytes_per_sample` is the number of bytes that channel
            consists of.

        """

        if samples_per_channel in (None, -1):
            samples_per_channel = self.samples_per_channel_available()

        if self.one_channel_for_all_lines:
            nof_lines = []
            for channel in self.names_of_channels():
                nof_lines.append(self.number_of_lines (channel))
            c = int (max (nof_lines))
            dtype = getattr(np, 'uint%s' % (8 * c))
        else:
            c = 1
            dtype = np.uint8

        number_of_channels = self.number_of_channels()

        if group_by == Constants.Val_GroupByScanNumber:
            data = np.zeros((samples_per_channel, number_of_channels),dtype=dtype)
        else:
            data = np.zeros((number_of_channels, samples_per_channel),dtype=dtype)

        err, count, bps = self.lib.ReadDigitalLines(samples_per_channel, float64 (timeout),
              group_by, data.ctypes.data, uInt32 (data.size * c),
              RetValue('i32'), RetValue('i32'),
              None
        )
        if count < samples_per_channel:
            if group_by == 'scan':
                return data[:count], bps
            else:
                return data[:,:count], bps
        return data, bps

Example 132

Project: dash-hack Source File: find_obj.py
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
    vis[:h1, :w1] = img1
    vis[:h2, w1:w1+w2] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if H is not None:
        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
        cv2.polylines(vis, [corners], True, (255, 255, 255))

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)
    p1 = np.int32([kpp[0].pt for kpp in kp_pairs])
    p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)

    green = (0, 255, 0)
    red = (0, 0, 255)
    white = (255, 255, 255)
    kp_color = (51, 103, 236)
    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            col = green
            cv2.circle(vis, (x1, y1), 2, col, -1)
            cv2.circle(vis, (x2, y2), 2, col, -1)
        else:
            col = red
            r = 2
            thickness = 3
            cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
            cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
            cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
            cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
    vis0 = vis.copy()
    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            cv2.line(vis, (x1, y1), (x2, y2), green)

    cv2.imshow(win, vis)
    def onmouse(event, x, y, flags, param):
        cur_vis = vis
        if flags & cv2.EVENT_FLAG_LBUTTON:
            cur_vis = vis0.copy()
            r = 8
            m = (anorm(p1 - (x, y)) < r) | (anorm(p2 - (x, y)) < r)
            idxs = np.where(m)[0]
            kp1s, kp2s = [], []
            for i in idxs:
                 (x1, y1), (x2, y2) = p1[i], p2[i]
                 col = (red, green)[status[i]]
                 cv2.line(cur_vis, (x1, y1), (x2, y2), col)
                 kp1, kp2 = kp_pairs[i]
                 kp1s.append(kp1)
                 kp2s.append(kp2)
            cur_vis = cv2.drawKeypoints(cur_vis, kp1s, flags=4, color=kp_color)
            cur_vis[:,w1:] = cv2.drawKeypoints(cur_vis[:,w1:], kp2s, flags=4, color=kp_color)

        cv2.imshow(win, cur_vis)
    cv2.setMouseCallback(win, onmouse)
    return vis

Example 133

Project: pylearn2 Source File: tl_challenge.py
    def __init__(self, which_set, center=False, custom_path=None):
        assert which_set in ['train', 'test', 'unlabeled', 'custom']

        path = "${PYLEARN2_DATA_PATH}/TLChallenge"

        if which_set == 'train':
            path += '/training/training-data.dat'
        elif which_set == 'test':
            path += '/test/test-data.dat'
        elif which_set == 'unlabeled':
            path += '/unlabelled_tiny.dat'
        elif which_set == 'custom':
            path = custom_path

        remote_path = preprocess(path)

        path = cache.datasetCache.cache_file(remote_path)
        X = N.fromfile(path, dtype=N.uint8, sep=' ')

        X = X.reshape(X.shape[0] / (32 * 32 * 3), 32 * 32 * 3, order='F')

        assert X.max() == 255
        assert X.min() == 0

        X = N.cast['float32'](X)

        if center:
            X -= 127.5

        view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3))

        X = view_converter.design_mat_to_topo_view(X)

        X = N.transpose(X, (0, 2, 1, 3))

        X = view_converter.topo_view_to_design_mat(X)

        super(TL_Challenge, self).__init__(X=X, y=None,
                                           view_converter=view_converter)

        assert not N.any(N.isnan(self.X))

        if which_set == 'train' or which_set == 'test':
            labels_path = remote_path[:-8] + 'labels.dat'
            labels_path = cache.datasetCache.cache_file(labels_path)
            self.y_fine = N.fromfile(labels_path, dtype=N.uint8, sep=' ')
            assert len(self.y_fine.shape) == 1
            assert self.y_fine.shape[0] == X.shape[0]
            # 0 :  aquatic_mammals
            # 1 :  fish
            # 2 :  flowers
            FOOD_CONTAINER = 3
            FRUIT = 4
            # 5 :  household_electrical_devices
            FURNITURE = 6
            INSECTS = 7
            # 8 :  large_carnivores
            # 9 :  large_man-made_outdoor_things
            # 10 :  large_natural_outdoor_scenes
            LARGE_OMNIVORES_HERBIVORES = 11
            MEDIUM_MAMMAL = 12
            # 13 :  non-insect_invertebrates
            # 14 :  people
            # 15 :  reptiles
            # 16 :  small_mammals
            # 17 :  trees
            # 18 :  vehicles_1
            # 19 :  vehicles_2

            self.y_coarse = self.y_fine.copy()
            self.y_coarse[self.y_coarse == 100] = INSECTS
            self.y_coarse[self.y_coarse == 101] = LARGE_OMNIVORES_HERBIVORES
            self.y_coarse[self.y_coarse == 102] = LARGE_OMNIVORES_HERBIVORES
            self.y_coarse[self.y_coarse == 103] = LARGE_OMNIVORES_HERBIVORES
            self.y_coarse[self.y_coarse == 104] = FRUIT
            self.y_coarse[self.y_coarse == 105] = FOOD_CONTAINER
            self.y_coarse[self.y_coarse == 106] = FRUIT
            self.y_coarse[self.y_coarse == 107] = MEDIUM_MAMMAL
            self.y_coarse[self.y_coarse == 108] = FRUIT
            self.y_coarse[self.y_coarse == 109] = FURNITURE

            assert self.y_coarse.min() == 3
            assert self.y_coarse.max() == 12

            for i in xrange(120):
                if self.y_coarse[i] == FRUIT:

                    assert self.y_fine[i] in [104, 106, 108]

Example 134

Project: binvox-rw-py Source File: binvox_rw.py
Function: read_as_coord_array
def read_as_coord_array(fp, fix_coords=True):
    """ Read binary binvox format as coordinates.

    Returns binvox model with voxels in a "coordinate" representation, i.e.  an
    3 x N array where N is the number of nonzero voxels. Each column
    corresponds to a nonzero voxel and the 3 rows are the (x, z, y) coordinates
    of the voxel.  (The odd ordering is due to the way binvox format lays out
    data).  Note that coordinates refer to the binvox voxels, without any
    scaling or translation.

    Use this to save memory if your model is very sparse (mostly empty).

    Doesn't do any checks on input except for the '#binvox' line.
    """
    dims, translate, scale = read_header(fp)
    raw_data = np.frombuffer(fp.read(), dtype=np.uint8)

    values, counts = raw_data[::2], raw_data[1::2]

    sz = np.prod(dims)
    index, end_index = 0, 0
    end_indices = np.cuemsum(counts)
    indices = np.concatenate(([0], end_indices[:-1])).astype(end_indices.dtype)

    values = values.astype(np.bool)
    indices = indices[values]
    end_indices = end_indices[values]

    nz_voxels = []
    for index, end_index in zip(indices, end_indices):
        nz_voxels.extend(range(index, end_index))
    nz_voxels = np.array(nz_voxels)
    # TODO are these dims correct?
    # according to docs,
    # index = x * wxh + z * width + y; // wxh = width * height = d * d

    x = nz_voxels / (dims[0]*dims[1])
    zwpy = nz_voxels % (dims[0]*dims[1]) # z*w + y
    z = zwpy / dims[0]
    y = zwpy % dims[0]
    if fix_coords:
        data = np.vstack((x, y, z))
        axis_order = 'xyz'
    else:
        data = np.vstack((x, z, y))
        axis_order = 'xzy'

    #return Voxels(data, dims, translate, scale, axis_order)
    return Voxels(np.ascontiguousarray(data), dims, translate, scale, axis_order)

Example 135

Project: aplpy Source File: rgb.py
def make_rgb_image(data, output, indices=(0, 1, 2), \
                   vmin_r=None, vmax_r=None, pmin_r=0.25, pmax_r=99.75, \
                   stretch_r='linear', vmid_r=None, exponent_r=2, \
                   vmin_g=None, vmax_g=None, pmin_g=0.25, pmax_g=99.75, \
                   stretch_g='linear', vmid_g=None, exponent_g=2, \
                   vmin_b=None, vmax_b=None, pmin_b=0.25, pmax_b=99.75, \
                   stretch_b='linear', vmid_b=None, exponent_b=2, \
                   make_nans_transparent=False, \
                   embed_avm_tags=True):
    '''
    Make an RGB image from a FITS RGB cube or from three FITS files.

    Parameters
    ----------

    data : str or tuple or list
        If a string, this is the filename of an RGB FITS cube. If a tuple
        or list, this should give the filename of three files to use for
        the red, green, and blue channel.

    output : str
        The output filename. The image type (e.g. PNG, JPEG, TIFF, ...)
        will be determined from the extension. Any image type supported by
        the Python Imaging Library can be used.

    indices : tuple, optional
        If data is the filename of a FITS cube, these indices are the
        positions in the third dimension to use for red, green, and
        blue respectively. The default is to use the first three
        indices.

    vmin_r, vmin_g, vmin_b : float, optional
        Minimum pixel value to use for the red, green, and blue channels.
        If set to None for a given channel, the minimum pixel value for
        that channel is determined using the corresponding pmin_x argument
        (default).

    vmax_r, vmax_g, vmax_b : float, optional
        Maximum pixel value to use for the red, green, and blue channels.
        If set to None for a given channel, the maximum pixel value for
        that channel is determined using the corresponding pmax_x argument
        (default).

    pmin_r, pmin_r, pmin_g : float, optional
        Percentile values used to determine for a given channel the
        minimum pixel value to use for that channel if the corresponding
        vmin_x is set to None. The default is 0.25% for all channels.

    pmax_r, pmax_g, pmax_b : float, optional
        Percentile values used to determine for a given channel the
        maximum pixel value to use for that channel if the corresponding
        vmax_x is set to None. The default is 99.75% for all channels.

    stretch_r, stretch_g, stretch_b : { 'linear', 'log', 'sqrt', 'arcsinh', 'power' }
        The stretch function to use for the different channels.

    vmid_r, vmid_g, vmid_b : float, optional
        Baseline values used for the log and arcsinh stretches. If
        set to None, this is set to zero for log stretches and to
        vmin - (vmax - vmin) / 30. for arcsinh stretches

    exponent_r, exponent_g, exponent_b : float, optional
        If stretch_x is set to 'power', this is the exponent to use.

    make_nans_transparent : bool, optional
        If set AND output is png, will add an alpha layer that sets pixels
        containing a NaN to transparent.

    embed_avm_tags : bool, optional
        Whether to embed AVM tags inside the image - this can only be done for
        JPEG and PNG files, and only if PyAVM is installed.
    '''

    try:
        from PIL import Image
    except ImportError:
        try:
            import Image
        except ImportError:
            raise ImportError("The Python Imaging Library (PIL) is required to make an RGB image")

    if isinstance(data, six.string_types):

        image = fits.getdata(data)
        image_r = image[indices[0], :, :]
        image_g = image[indices[1], :, :]
        image_b = image[indices[2], :, :]

        # Read in header
        header = fits.getheader(data)

        # Remove information about third dimension
        header['NAXIS'] = 2
        for key in ['NAXIS', 'CTYPE', 'CRPIX', 'CRVAL', 'CUNIT', 'CDELT', 'CROTA']:
            for coord in range(3, 6):
                name = key + str(coord)
                if name in header:
                    header.__delitem__(name)

    elif (type(data) == list or type(data) == tuple) and len(data) == 3:

        filename_r, filename_g, filename_b = data
        image_r = fits.getdata(filename_r)
        image_g = fits.getdata(filename_g)
        image_b = fits.getdata(filename_b)

        # Read in header
        header = fits.getheader(filename_r)

    else:
        raise Exception("data should either be the filename of a FITS cube or a list/tuple of three images")

    # are we making a transparent layer?
    do_alpha = make_nans_transparent and output.lower().endswith('.png')

    if do_alpha:
        log.info("Making alpha layer")

        # initialize alpha layer
        image_alpha = np.empty_like(image_r, dtype=np.uint8)
        image_alpha[:] = 255

        # look for nans in images
        for im in [image_r, image_g, image_b]:
            image_alpha[np.isnan(im)] = 0

    log.info("Red:")
    image_r = Image.fromarray(_data_stretch(image_r, \
                                            vmin=vmin_r, vmax=vmax_r, \
                                            pmin=pmin_r, pmax=pmax_r, \
                                            stretch=stretch_r, \
                                            vmid=vmid_r, \
                                            exponent=exponent_r))

    log.info("Green:")
    image_g = Image.fromarray(_data_stretch(image_g, \
                                            vmin=vmin_g, vmax=vmax_g, \
                                            pmin=pmin_g, pmax=pmax_g, \
                                            stretch=stretch_g, \
                                            vmid=vmid_g, \
                                            exponent=exponent_g))

    log.info("Blue:")
    image_b = Image.fromarray(_data_stretch(image_b, \
                                            vmin=vmin_b, vmax=vmax_b, \
                                            pmin=pmin_b, pmax=pmax_b, \
                                            stretch=stretch_b, \
                                            vmid=vmid_b, \
                                            exponent=exponent_b))

    img = Image.merge("RGB", (image_r, image_g, image_b))

    if do_alpha:
        # convert to RGBA and add alpha layer
        image_alpha = Image.fromarray(image_alpha)
        img.convert("RGBA")
        img.putalpha(image_alpha)

    img = img.transpose(Image.FLIP_TOP_BOTTOM)

    img.save(output)

    if embed_avm_tags:

        try:
            import pyavm
        except ImportError:
            warnings.warn("PyAVM 0.9.1 or later is not installed, so AVM tags will not be embedded in RGB image")
            return

        if version.LooseVersion(pyavm.__version__) < version.LooseVersion('0.9.1'):
            warnings.warn("PyAVM 0.9.1 or later is not installed, so AVM tags will not be embedded in RGB image")
            return

        from pyavm import AVM

        if output.lower().endswith(('.jpg', '.jpeg', '.png')):
            avm = AVM.from_header(header)
            avm.embed(output, output)
        else:
            warnings.warn("AVM tags will not be embedded in RGB image, as only JPEG and PNG files are supported")

Example 136

Project: AutoRCCar Source File: rc_driver.py
Function: handle
    def handle(self):

        global sensor_data
        stream_bytes = ' '
        stop_flag = False
        stop_sign_active = True

        # stream video frames one by one
        try:
            while True:
                stream_bytes += self.rfile.read(1024)
                first = stream_bytes.find('\xff\xd8')
                last = stream_bytes.find('\xff\xd9')
                if first != -1 and last != -1:
                    jpg = stream_bytes[first:last+2]
                    stream_bytes = stream_bytes[last+2:]
                    gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_GRAYSCALE)
                    image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)

                    # lower half of the image
                    half_gray = gray[120:240, :]

                    # object detection
                    v_param1 = self.obj_detection.detect(self.stop_cascade, gray, image)
                    v_param2 = self.obj_detection.detect(self.light_cascade, gray, image)

                    # distance measurement
                    if v_param1 > 0 or v_param2 > 0:
                        d1 = self.d_to_camera.calculate(v_param1, self.h1, 300, image)
                        d2 = self.d_to_camera.calculate(v_param2, self.h2, 100, image)
                        self.d_stop_sign = d1
                        self.d_light = d2

                    cv2.imshow('image', image)
                    #cv2.imshow('mlp_image', half_gray)

                    # reshape image
                    image_array = half_gray.reshape(1, 38400).astype(np.float32)
                    
                    # neural network makes prediction
                    prediction = self.model.predict(image_array)

                    # stop conditions
                    if sensor_data is not None and sensor_data < 30:
                        print("Stop, obstacle in front")
                        self.rc_car.stop()
                    
                    elif 0 < self.d_stop_sign < 25 and stop_sign_active:
                        print("Stop sign ahead")
                        self.rc_car.stop()

                        # stop for 5 seconds
                        if stop_flag is False:
                            self.stop_start = cv2.getTickCount()
                            stop_flag = True
                        self.stop_finish = cv2.getTickCount()

                        self.stop_time = (self.stop_finish - self.stop_start)/cv2.getTickFrequency()
                        print "Stop time: %.2fs" % self.stop_time

                        # 5 seconds later, continue driving
                        if self.stop_time > 5:
                            print("Waited for 5 seconds")
                            stop_flag = False
                            stop_sign_active = False

                    elif 0 < self.d_light < 30:
                        #print("Traffic light ahead")
                        if self.obj_detection.red_light:
                            print("Red light")
                            self.rc_car.stop()
                        elif self.obj_detection.green_light:
                            print("Green light")
                            pass
                        elif self.obj_detection.yellow_light:
                            print("Yellow light flashing")
                            pass
                        
                        self.d_light = 30
                        self.obj_detection.red_light = False
                        self.obj_detection.green_light = False
                        self.obj_detection.yellow_light = False

                    else:
                        self.rc_car.steer(prediction)
                        self.stop_start = cv2.getTickCount()
                        self.d_stop_sign = 25

                        if stop_sign_active is False:
                            self.drive_time_after_stop = (self.stop_start - self.stop_finish)/cv2.getTickFrequency()
                            if self.drive_time_after_stop > 5:
                                stop_sign_active = True

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        self.rc_car.stop()
                        break

            cv2.destroyAllWindows()

        finally:
            print "Connection closed on thread 1"

Example 137

Project: opendr Source File: slider_demo.py
Function: main
def main():
    # Create a black image, a window
    img = np.zeros((300,512,3), np.uint8)
    cv2.namedWindow('image')
    cv2.namedWindow('derivatives')

    rn = get_renderer()

    tracked = {
        'sph0': rn.vc.components[0],
        'sph1': rn.vc.components[1],
        'sph2': rn.vc.components[2],
        'sph3': rn.vc.components[3],
        'k0': rn.camera.k[0],
        'k1': rn.camera.k[1],
        'k2': rn.camera.k[2]
    }

    cnst = 1000
    for k in sorted(tracked.keys()):
        v = tracked[k]
        cv2.createTrackbar(k, 'image', 0,cnst, nothing)

    old_tracked = tracked
    cv2.setTrackbarPos('sph0', 'image', 800)
    while(1):
        cv2.imshow('image',rn.r)
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break
    
        for k, v in tracked.items():
            v[:] = np.array(cv2.getTrackbarPos(k, 'image')).astype(np.float32)*4/cnst
            if tracked[k].r[0] != old_tracked[k].r[0]:
                drim = rn.dr_wrt(v).reshape(rn.shape)
                mn = np.mean(drim)
                drim /= np.max(np.abs(drim.ravel()))*2.
                drim += .5
                # drim = drim - np.min(drim)
                # drim = drim / np.max(drim)
                cv2.imshow('derivatives', drim)
               
        cv2.waitKey(1)
        old_tracked = deepcopy(tracked)
    # while True: 
    #     for k_change in sorted(tracked.keys()):
    #         if k_change == 'sph0':
    #             continue
    #         for t in np.arange(0, np.pi, .05):
    #             cv2.setTrackbarPos(k_change, 'image', int(np.sin(t)*1000))
    #             cv2.imshow('image',rn.r)
    #             k = cv2.waitKey(1) & 0xFF
    #             if k == 27:
    #                 break
    # 
    #             for k, v in tracked.items():
    #                 v[:] = np.array(cv2.getTrackbarPos(k, 'image')).astype(np.float32)*4/cnst
    #                 if tracked[k].r[0] != old_tracked[k].r[0]:
    #                     drim = rn.dr_wrt(v).reshape(rn.shape)
    #                     mn = np.mean(drim)
    #                     drim /= np.max(np.abs(drim.ravel()))*2.
    #                     drim += .5
    #                     # drim = drim - np.min(drim)
    #                     # drim = drim / np.max(drim)
    #                     cv2.imshow('derivatives', drim)
    #         
    #         
    #             print rn.vc.components
    # 
    #             cv2.waitKey(1)
    #             old_tracked = deepcopy(tracked)

    cv2.destroyAllWindows()

Example 138

Project: hdf5storage Source File: utilities.py
def read_data(f, grp, name, options):
    """ Writes a piece of data into an open HDF5 file.

    Low level function to read a Python type of the specified name from
    specified Group.

    Parameters
    ----------
    f : h5py.File
        The open HDF5 file.
    grp : h5py.Group or h5py.File
        The Group to read the data from.
    name : str
        The name of the data to read.
    options : hdf5storage.core.Options
        The options to use when reading.

    Returns
    -------
    data
        The data named `name` in Group `grp`.

    Raises
    ------
    CantReadError
        If the data cannot be read successfully.

    See Also
    --------
    hdf5storage.read : Higher level version.
    write_data
    hdf5storage.Options

    """
    # If name isn't found, return error.
    if name not in grp:
        raise hdf5storage.exceptions.CantReadError('Could not find ' \
            + posixpath.join(grp.name, name))

    # Get the different attributes that can be used to identify they
    # type, which are the type string and the MATLAB class.
    type_string = get_attribute_string(grp[name], 'Python.Type')
    matlab_class = get_attribute_string(grp[name], 'MATLAB_class')

    # If the type_string is present, get the marshaller for it. If it is
    # not, use the one for the matlab class if it is given. Otherwise,
    # use the fallback (NumpyScalarArrayMarshaller for both Datasets and
    # Groups). If calls to the marshaller collection to get the right
    # marshaller don't return one (return None), we also go to the
    # default). Also get whether we have the modules required to read it
    # accurately or not (approximately)

    m = None
    has_modules = False
    mc = options.marshaller_collection
    if type_string is not None:
        m, has_modules = mc.get_marshaller_for_type_string(type_string)
    elif matlab_class is not None:
        m, has_modules = \
            mc.get_marshaller_for_matlab_class(matlab_class)
    elif hasattr(grp[name], 'dtype'):
        # Numpy dataset
        m, has_modules = \
            mc.get_marshaller_for_type(grp[name].dtype.type)
    elif isinstance(grp[name], (h5py.Group, h5py.File)):
        # Groups and files are like Matlab struct
        m, has_modules = mc.get_marshaller_for_matlab_class('struct')

    if m is None:
        # use Numpy as a fallback
        m, has_modules = mc.get_marshaller_for_type(np.uint8)

    # If a marshaller was found, use it to write the data. Otherwise,
    # return an error.

    if m is not None:
        if has_modules:
            return m.read(f, grp, name, options)
        else:
            return m.read_approximate(f, grp, name, options)
    else:
        raise hdf5storage.exceptions.CantReadError('Could not read '
                                                   + grp[name].name)

Example 139

Project: mcedit2 Source File: chunkupdate.py
    def areaLights(self, lightName):
        chunkSection = self.chunkSection
        chunkWidth, chunkLength, chunkHeight = self.Blocks.shape
        shape = (chunkWidth + 4, chunkLength + 4, chunkHeight + 4)

        if not hasattr(chunkSection, lightName):
            ret = numpy.zeros(shape, numpy.uint8)
            ret[:] = 15
            return ret

        def Light(cs):
            return getattr(cs, lightName)

        neighboringChunks = self.chunkUpdate.neighboringChunks


        areaLights = numpy.empty(shape, numpy.uint8)
        if lightName == "SkyLight":
            default = 15
        else:
            default = 0

        areaLights[:, :, :] = default

        areaLights[2:-2, 2:-2, 2:-2] = Light(chunkSection)

        y = chunkSection.Y

        if faces.FaceXDecreasing in neighboringChunks:
            ncs = neighboringChunks[faces.FaceXDecreasing].getSection(y)
            if ncs:
                areaLights[2:-2, 2:-2, :2] = Light(ncs)[:, :, -2:]

        if faces.FaceXIncreasing in neighboringChunks:
            ncs = neighboringChunks[faces.FaceXIncreasing].getSection(y)
            if ncs:
                areaLights[2:-2, 2:-2, -2:] = Light(ncs)[:, :, :2]

        if faces.FaceZDecreasing in neighboringChunks:
            ncs = neighboringChunks[faces.FaceZDecreasing].getSection(y)
            if ncs:
                areaLights[2:-2, :2, 2:-2] = Light(ncs)[:, -2:, :]

        if faces.FaceZIncreasing in neighboringChunks:
            ncs = neighboringChunks[faces.FaceZIncreasing].getSection(y)
            if ncs:
                areaLights[2:-2, -2:, 2:-2] = Light(ncs)[:, :2, :]

        above = self.chunkUpdate.chunk.getSection(y + 1)
        if above:
            areaLights[-2:, 2:-2, 2:-2] = Light(above)[:2, :, :]

        below = self.chunkUpdate.chunk.getSection(y - 1)
        if below:
            areaLights[:2, 2:-2, 2:-2, ] = Light(below)[-2:, :, :]

        nx, ny, nz = self.blocktypes.useNeighborBrightness[self.areaBlocks].nonzero()
        nxd = nx
        nx = nx + 1
        nxi = nx + 1
        nyd = ny
        ny = ny + 1
        nyi = ny + 1
        nzd = nz
        nz = nz + 1
        nzi = nz + 1
        
        neighborBrightness = [
            areaLights[nxi, ny, nz],
            areaLights[nxd, ny, nz],
            areaLights[nx, nyi, nz],
            areaLights[nx, nyd, nz],
            areaLights[nx, ny, nzi],
            areaLights[nx, ny, nzd],
        ]
        neighborBrightness = numpy.amax(neighborBrightness, 0)

        areaLights[nx, ny, nz] = neighborBrightness

        return areaLights[1:-1, 1:-1, 1:-1]

Example 140

Project: visvis Source File: line.py
    def _CreateSprites(self, ms, mw, mew):
        """ Create the sprites from scratch. """

        ## init
        # We'll make a 2D array of size d, which fits the marker completely.
        # Then we make a second array with the original eroded as many
        # times as the edge is wide.

        # find nearest multiple of four
        d = 4
        while d < mw+2*mew:
            d += 4
        # what is the offset for the face
        #dd = ( d-(mw+2*mew) ) / 2
        dd = (d-mw)/2

        # calc center
        c = mw/2.0

        # create patch
        data1 = np.zeros((d,d),dtype=np.uint8)

        # create subarray for face
        data2 = data1[dd:,dd:][:mw,:mw]

        ## define marker functions
        def square(xy):
            x, y = xy
            data2[y,x]=255
        def diamond(xy):
            x, y = xy
            if y > x-mw/2 and y<x+mw/2 and y > (mw-x)-c and y<(mw-x)+c:
                data2[y,x]=255
        def plus(xy):
            x, y = xy
            if y > mw/3 and y < 2*mw/3:
                data2[y,x]=255
            if x > mw/3 and x < 2*mw/3:
                data2[y,x]=255
        def cross(xy):
            x, y = xy
            if y > x-mw/4 and y < x+mw/4:
                data2[y,x]=255
            if y > (mw-x)-mw/4 and y < (mw-x)+mw/4:
                data2[y,x]=255
        def flower(xy):
            x, y = xy
            a = math.atan2(y-c,x-c)
            r = (x-c)**2 + (y-c)**2
            relAng = 5 * a / (2*math.pi)  # whole circle from 1 to 5
            subAng = (relAng % 1)       # get the non-integer bit
            if subAng>0.5: subAng = 1-subAng
            refRad1, refRad2 = c/4, c
            a = math.sin(subAng*math.pi)
            refRad = (1-a)*refRad1 + a*refRad2
            if r < refRad**2:
                data2[y,x]=255
        def star5(xy):
            x, y = xy
            a = math.atan2(y-c,x-c) - 0.5*math.pi
            r = (x-c)**2 + (y-c)**2
            relAng = 5 * a / (2*math.pi)  # whole circle from 1 to 5
            subAng = (relAng % 1)       # get the non-integer bit
            if subAng>0.5: subAng = 1-subAng
            refRad1, refRad2 = c/4, c
            a = math.asin(subAng*2) / (math.pi/2)
            refRad = (1-a)*refRad1 + a*refRad2
            if r < refRad**2:
                data2[y,x]=255
        def star6(xy):
            x, y = xy
            a = math.atan2(y-c,x-c)
            r = (x-c)**2 + (y-c)**2
            relAng = 6 * a / (2*math.pi)  # whole circle from 1 to 5
            subAng = (relAng % 1)       # get the non-integer bit
            if subAng>0.5: subAng = 1-subAng
            refRad1, refRad2 = c/3, c
            a = math.asin(subAng*2) / (math.pi/2)
            refRad = (1-a)*refRad1 + a*refRad2
            if r < refRad**2:
                data2[y,x]=255
        def circle(xy):
            x,y = xy
            r = (x-c)**2 + (y-c)**2
            if r < c**2:
                data2[y,x] = 255
        def triangleDown(xy):
            x,y = xy
            if x >= 0.5*y and x <= mw-0.5*(y+1):
                data2[y,x] = 255
        def triangleUp(xy):
            x,y = xy
            if x >= c-0.5*y and x <= c+0.5*y:
                data2[y,x] = 255
        def triangleLeft(xy):
            x,y = xy
            if y >= c-0.5*x and y <= c+0.5*x:
                data2[y,x] = 255
        def triangleRight(xy):
            x,y = xy
            print('oef', x, y)
            if y >= 0.5*x and y <= mw-0.5*(x+1):
                data2[y,x] = 255

        # a dict ms to function
        funcs = {   's':square, 'd':diamond, '+':plus, 'x':cross,
                    '*':star5, 'p':star5, 'h':star6, 'f':flower,
                    '.':circle, 'o':circle, 'v':triangleDown,
                    '^':triangleUp, '<':triangleLeft, '>':triangleRight}

        # select function
        try:
            func = funcs[ms]
        except KeyError:
            func = circle
        
        ## Create face
        I,J = np.where(data2==0)
        for xy in zip(I,J):
            func(xy)
        
        ## dilate x times to create edge
        # we add a border to the array to make the dilation possible
        data3 = np.zeros((d+4,d+4), dtype=np.uint8)
        data3[2:-2,2:-2] = 1
        # retrieve indices.
        I,J = np.where(data3==1)
        # copy face
        data3[2:-2,2:-2] = data1
        tmp = data3.copy()
        # apply
        def dilatePixel(xy):
            x,y = xy
            if tmp[y-1:y+2,x-1:x+2].max():
                data3[y,x] = 255
        for i in range(int(mew)):
            for xy in zip(I,J):
                dilatePixel(xy)
            tmp = data3.copy()
        # remove border
        data3 = data3[2:-2,2:-2]

        ## create sprites and return

        sprite1 = Sprite(data1, mw)
        sprite2 = Sprite(data3-data1, mw+2*mew)
        
        return d, sprite1, sprite2

Example 141

Project: neural-network-animation Source File: image.py
    def _get_unsampled_image(self, A, image_extents, viewlim):
        """
        convert numpy array A with given extents ([x1, x2, y1, y2] in
        data coordinate) into the Image, given the viewlim (should be a
        bbox instance).  Image will be clipped if the extents is
        significantly larger than the viewlim.
        """
        xmin, xmax, ymin, ymax = image_extents
        dxintv = xmax-xmin
        dyintv = ymax-ymin

        # the viewport scale factor
        if viewlim.width == 0.0 and dxintv == 0.0:
            sx = 1.0
        else:
            sx = dxintv/viewlim.width
        if viewlim.height == 0.0 and dyintv == 0.0:
            sy = 1.0
        else:
            sy = dyintv/viewlim.height
        numrows, numcols = A.shape[:2]
        if sx > 2:
            x0 = (viewlim.x0-xmin)/dxintv * numcols
            ix0 = max(0, int(x0 - self._filterrad))
            x1 = (viewlim.x1-xmin)/dxintv * numcols
            ix1 = min(numcols, int(x1 + self._filterrad))
            xslice = slice(ix0, ix1)
            xmin_old = xmin
            xmin = xmin_old + ix0*dxintv/numcols
            xmax = xmin_old + ix1*dxintv/numcols
            dxintv = xmax - xmin
            sx = dxintv/viewlim.width
        else:
            xslice = slice(0, numcols)

        if sy > 2:
            y0 = (viewlim.y0-ymin)/dyintv * numrows
            iy0 = max(0, int(y0 - self._filterrad))
            y1 = (viewlim.y1-ymin)/dyintv * numrows
            iy1 = min(numrows, int(y1 + self._filterrad))
            if self.origin == 'upper':
                yslice = slice(numrows-iy1, numrows-iy0)
            else:
                yslice = slice(iy0, iy1)
            ymin_old = ymin
            ymin = ymin_old + iy0*dyintv/numrows
            ymax = ymin_old + iy1*dyintv/numrows
            dyintv = ymax - ymin
            sy = dyintv/viewlim.height
        else:
            yslice = slice(0, numrows)

        if xslice != self._oldxslice or yslice != self._oldyslice:
            self._imcache = None
            self._oldxslice = xslice
            self._oldyslice = yslice

        if self._imcache is None:
            if self._A.dtype == np.uint8 and self._A.ndim == 3:
                im = _image.frombyte(self._A[yslice, xslice, :], 0)
                im.is_grayscale = False
            else:
                if self._rgbacache is None:
                    x = self.to_rgba(self._A, bytes=False)
                    # Avoid side effects: to_rgba can return its argument
                    # unchanged.
                    if np.may_share_memory(x, self._A):
                        x = x.copy()
                    # premultiply the colors
                    x[..., 0:3] *= x[..., 3:4]
                    x = (x * 255).astype(np.uint8)
                    self._rgbacache = x
                else:
                    x = self._rgbacache
                im = _image.frombyte(x[yslice, xslice, :], 0)
                if self._A.ndim == 2:
                    im.is_grayscale = self.cmap.is_gray()
                else:
                    im.is_grayscale = False
            self._imcache = im

            if self.origin == 'upper':
                im.flipud_in()
        else:
            im = self._imcache

        return im, xmin, ymin, dxintv, dyintv, sx, sy

Example 142

Project: HistomicsTK Source File: reinhard.py
def reinhard(im_src, target_mu, target_sigma, src_mu=None, src_sigma=None):
    """Performs Reinhard color normalization to transform the color
    characteristics of an image to a desired standard.

    The standard is defined by the mean and standard deviations of the target
    image in LAB color space defined by Ruderman. The input image is converted
    to Ruderman's LAB space, the LAB channels are each centered and scaled to
    zero-mean unit variance, and then rescaled and shifted to match the target
    image statistics. If the LAB statistics for the input image are provided
    (`src_mu` and `src_sigma`) then these will be used for normalization,
    otherwise they will be derived from the input image `im_src`.

    Parameters
    ----------
    im_src : array_like
        An RGB image

    target_mu : array_like
        A 3-element array containing the means of the target image channels
        in LAB color space.

    target_sigma : array_like
        A 3-element array containing the standard deviations of the target
        image channels in LAB color space.

    src_mu : array_like, optional
        A 3-element array containing the means of the source image channels in
        LAB color space. Used with ReinhardSample for uniform normalization of
        tiles from a slide.

    src_sigma : array, optional
        A 3-element array containing the standard deviations of the source
        image channels in LAB color space. Used with ReinhardSample for
        uniform normalization of tiles tiles from a slide.

    Returns
    -------
    im_normalized : array_like
        Color Normalized RGB image

    See Also
    --------
    histomicstk.preprocessing.color_conversion.rgb_to_lab,
    histomicstk.preprocessing.color_conversion.lab_to_rgb

    References
    ----------
    .. [1] E. Reinhard, M. Adhikhmin, B. Gooch, P. Shirley, "Color transfer
       between images," in IEEE Computer Graphics and Applications, vol.21,
       no.5,pp.34-41, 2001.
    .. [2] D. Ruderman, T. Cronin, and C. Chiao, "Statistics of cone responses
       to natural images: implications for visual coding," J. Opt. Soc. Am. A
       vol.15, pp.2036-2045, 1998.
    """

    # get input image dimensions
    m = im_src.shape[0]
    n = im_src.shape[1]

    # convert input image to LAB color space
    im_lab = color_conversion.rgb_to_lab(im_src)

    # calculate src_mu if not provided
    if src_mu is None:
        src_mu = im_lab.sum(axis=0).sum(axis=0) / (m * n)

    # center to zero-mean
    for i in range(3):
        im_lab[:, :, i] = im_lab[:, :, i] - src_mu[i]

    # calculate src_sigma if not provided
    if src_sigma is None:
        src_sigma = ((im_lab * im_lab).sum(axis=0).sum(axis=0) /
                     (m * n - 1)) ** 0.5

    # scale to unit variance
    for i in range(3):
        im_lab[:, :, i] = im_lab[:, :, i] / src_sigma[i]

    # rescale and recenter to match target statistics
    for i in range(3):
        im_lab[:, :, i] = im_lab[:, :, i] * target_sigma[i] + target_mu[i]

    # convert back to RGB colorspace
    im_normalized = color_conversion.lab_to_rgb(im_lab)
    im_normalized[im_normalized > 255] = 255
    im_normalized[im_normalized < 0] = 0
    im_normalized = im_normalized.astype(np.uint8)

    return im_normalized

Example 143

Project: worldengine Source File: __init__.py
def export(world, export_filetype = 'GTiff', export_datatype = 'float32', path = 'seed_output'):
    try:
        gdal
    except NameError:
        print("Cannot export: please install pygdal.")
        sys.exit(1)

    final_driver = gdal.GetDriverByName(export_filetype)
    if final_driver is None:
        print("%s driver not registered." % export_filetype)
        sys.exit(1)

    # try to find the proper file-suffix
    export_filetype = export_filetype.lower()
    if export_filetype in gdal_mapper:
        export_filetype = gdal_mapper[export_filetype]

    # Note: GDAL will throw informative errors on its own whenever file type and data type cannot be matched.

    # translate export_datatype; http://www.gdal.org/gdal_8h.html#a22e22ce0a55036a96f652765793fb7a4
    export_datatype = export_datatype.lower()
    if export_datatype in ['gdt_byte', 'uint8', 'int8', 'byte', 'char']:  # GDAL does not support int8
        bpp, signed, normalize = (8, False, True)
        numpy_type = numpy.uint8
        gdal_type  = gdal.GDT_Byte
    elif export_datatype in ['gdt_uint16', 'uint16']:
        bpp, signed, normalize = (16, False, True)
        numpy_type = numpy.uint16
        gdal_type  = gdal.GDT_UInt16
    elif export_datatype in ['gdt_uint32', 'uint32']:
        bpp, signed, normalize = (32, False, True)
        numpy_type = numpy.uint32
        gdal_type  = gdal.GDT_UInt32
    elif export_datatype in ['gdt_int16', 'int16']:
        bpp, signed, normalize = (16, True, True)
        numpy_type = numpy.int16
        gdal_type  = gdal.GDT_Int16
    elif export_datatype in ['gdt_int32', 'int32', 'int']:  # fallback for 'int'
        bpp, signed, normalize = (32, True, True)
        numpy_type = numpy.int32
        gdal_type  = gdal.GDT_Int32
    elif export_datatype in ['gdt_float32', 'float32', 'float']:  # fallback for 'float'
        bpp, signed, normalize = (32, True, False)
        numpy_type = numpy.float32
        gdal_type  = gdal.GDT_Float32
    elif export_datatype in ['gdt_float64', 'float64']:
        bpp, signed, normalize = (64, True, False)
        numpy_type = numpy.float64
        gdal_type  = gdal.GDT_Float64
    else:
        raise TypeError("Type of data not recognized or not supported by GDAL: %s" % export_datatype)

    # massage data to scale between the absolute min and max
    elevation = numpy.copy(world.layers['elevation'].data)

    # shift data according to minimum possible value
    if signed:
        elevation = elevation - world.sea_level()  # elevation 0.0 now refers to sea-level
    else:
        elevation -= elevation.min()  # lowest point at 0.0

    # rescale data (currently integer-types only)
    if normalize:
        # elevation maps usually have a range of 0 to 10, maybe 15 - rescaling for integers is essential
        if signed:
            elevation *= (2**(bpp - 1) - 1) / max(abs(elevation.min(), abs(elevation.max())))
        else:
            elevation *= (2**bpp - 1) / abs(elevation.max())

    # round data (integer-types only)
    if numpy_type != numpy.float32 and numpy_type != numpy.float64:
        elevation = elevation.round()

    # switch to final data type; no rounding performed
    elevation = elevation.astype(numpy_type)

    # take elevation data and push it into an intermediate GTiff format (some formats don't support being written by Create())
    inter_driver = gdal.GetDriverByName("GTiff")
    fh_inter_file, inter_file = tempfile.mkstemp()  # returns: (file-handle, absolute path)
    initial_ds = inter_driver.Create(inter_file, world.width, world.height, 1, gdal_type)
    band = initial_ds.GetRasterBand(1)
    
    band.WriteArray(elevation)
    band = None  # dereference band
    initial_ds = None  # save/flush and close

    # take the intermediate GTiff format and convert to final format
    initial_ds = gdal.Open(inter_file)
    final_driver.CreateCopy('%s-%d.%s' % (path, bpp, export_filetype), initial_ds)

    initial_ds = None
    os.close(fh_inter_file)
    os.remove(inter_file)

Example 144

Project: pytorch Source File: runner.py
def run():
  TorchModel = PyTorchHelpers.load_lua_class('torch_model.lua', 'TorchModel')
  torchModel = TorchModel(backend, 28, 10)

  mndata = MNIST('../../data/mnist')
  imagesList, labelsList = mndata.load_training()
  labels = np.array(labelsList, dtype=np.uint8)
  images = np.array(imagesList, dtype=np.float32)
  labels += 1  # since torch/lua labels are 1-based
  N = labels.shape[0]
  print('loaded mnist training data')

  if numTrain > 0:
    N = min(N, numTrain)
  print('numExamples N', N)
  numBatches = N // batchSize
  for epoch in range(numEpochs):
    epochLoss = 0
    epochNumRight = 0
    for b in range(numBatches):
      res = torchModel.trainBatch(
        learningRate,
        images[b * batchSize:(b+1) * batchSize],
        labels[b * batchSize:(b+1) * batchSize])
#      print('res', res)
      numRight = res['numRight']
      loss = res['loss']
      epochNumRight += numRight
      epochLoss += loss
      print('epoch ' + str(epoch) + ' batch ' + str(b) + ' accuracy: ' + str(numRight * 100.0 / batchSize) + '%')
    print('epoch ' + str(epoch) + ' accuracy: ' + str(epochNumRight * 100.0 / N) + '%')

  print('finished training')
  print('loading test data...')
  imagesList, labelsList = mndata.load_testing()
  labels = np.array(labelsList, dtype=np.uint8)
  images = np.array(imagesList, dtype=np.float32)
  labels += 1  # since torch/lua labels are 1-based
  N = labels.shape[0]
  print('loaded mnist testing data')

  numBatches = N // batchSize
  epochLoss = 0
  epochNumRight = 0
  for b in range(numBatches):
    predictions = torchModel.predict(images[b * batchSize:(b+1) * batchSize]).asNumpyTensor().reshape(batchSize)
    labelsBatch = labels[b * batchSize:(b+1) * batchSize]
    numRight = (predictions == labelsBatch).sum()
    epochNumRight += numRight
  print('test results: accuracy: ' + str(epochNumRight * 100.0 / N) + '%')

Example 145

Project: reseg Source File: camvid.py
def load_dataset_camvid_segnet(path):
    img_train_path = os.path.join(path, 'train')
    img_valid_path = os.path.join(path, 'val')
    img_test_path = os.path.join(path, 'test')

    gt_train_path = os.path.join(path, 'trainannot')
    gt_valid_path = os.path.join(path, 'valannot')
    gt_test_path = os.path.join(path, 'testannot')

    camvid_colors = OrderedDict([
        ("Sky", np.array([128, 128, 128], dtype=np.uint8)),
        ("Building", np.array([128, 0, 0], dtype=np.uint8)),
        ("Column_Pole", np.array([192, 192, 128], dtype=np.uint8)),
        ("Road", np.array([128, 64, 128], dtype=np.uint8)),
        ("Sidewalk", np.array([0, 0, 192], dtype=np.uint8)),
        ("Tree", np.array([128, 128, 0], dtype=np.uint8)),
        ("SignSymbol", np.array([192, 128, 128], dtype=np.uint8)),
        ("Fence", np.array([64, 64, 128], dtype=np.uint8)),
        ("Car", np.array([64, 0, 128], dtype=np.uint8)),
        ("Pedestrian", np.array([64, 64, 0], dtype=np.uint8)),
        ("Bicyclist", np.array([0, 128, 192], dtype=np.uint8)),
        ("Void", np.array([0, 0, 0], dtype=np.uint8))
    ])

    print "Processing Camvid SegNet train dataset..."
    img_train, mask_train, filenames_train = load_images(
        img_train_path, gt_train_path, camvid_colors, load_greylevel_mask=True,
        save=False)  # load_greylevel_mask=True by default because it's grey

    print "Processing Camvid SegNet valid dataset..."
    img_valid, mask_valid, filenames_valid = load_images(
        img_valid_path, gt_valid_path, camvid_colors, load_greylevel_mask=True,
        save=False)  # load_greylevel_mask=True by default because it's grey

    print "Processing Camvid SegNet test dataset..."
    img_test, mask_test, filenames_test = load_images(
        img_test_path, gt_test_path, camvid_colors, load_greylevel_mask=True,
        save=False)  # load_greylevel_mask=True by default because it's grey

    return (img_train, mask_train, filenames_train,
            img_test, mask_test, filenames_test,
            img_valid, mask_valid, filenames_valid)

Example 146

Project: HistomicsTK Source File: cLoG.py
def cLoG(I, Mask, SigmaMin=30*1.414, SigmaMax=50*1.414):
    """Constrainted Laplacian of Gaussian filter.

    Takes as input a grayscale nuclear image and binary mask of cell nuclei,
    and uses the distance transform of the nuclear mask to constrain the LoG
    filter response of the image for nuclear seeding. Returns a LoG filter
    image of type float. Local maxima are used for seeding cells.

    Parameters
    ----------
    I : array_like
        A hematoxylin intensity image obtained from ColorDeconvolution. Objects
        are assumed to be dark with a light background.
    Mask : array_like
        A binary image where nuclei pixels have value 1/True, and non-nuclear
        pixels have value 0/False.
    SigmaMin : float
        A scalar defining the minimum scaled nuclear radius. Radius is scaled
        by sqrt(2). Default value = 30 * 2 ** 0.5.
    SigmaMax : float
        A scalar defining the maximum scaled nuclear radius. Radius is scaled
        by sqrt(2). Default value = 50 * 2 ** 0.5.

    Returns
    -------
    Iout : array_like
        A color image of type unsigned char where boundary pixels take
        on the color defined by the RGB-triplet 'Color'.

    References
    ----------
    .. [1] Y. Al-Kofahi et al "Improved Automatic Detection and Segmentation
           of Cell Nuclei in Histopathology Images" in IEEE Transactions on
           Biomedical Engineering,vol.57,no.4,pp.847-52, 2010.
    """

    # convert intensity image type to float if needed
    if I.dtype == np.uint8:
        I = I.astype(np.float)

    # generate distance map
    Distance = sp.ndimage.morphology.distance_transform_edt(Mask)

    # initialize constraint
    Constraint = np.maximum(SigmaMin, np.minimum(SigmaMax, 2*Distance))

    # initialize log filter response array
    Iout = np.finfo(Distance.dtype).min * np.ones(Mask.shape)

    # LoG filter over scales
    Start = np.floor(SigmaMin)
    Stop = np.ceil(SigmaMax)
    Sigmas = np.linspace(Start, Stop, Stop-Start+1)
    for Sigma in Sigmas:

        # generate normalized filter response
        Response = Sigma ** 2 * \
            sp.ndimage.filters.gaussian_laplace(I, Sigma, mode='mirror')

        # constrain response
        Map = Sigma < Constraint
        Response[~Map] = np.finfo(Distance.dtype).min

        # replace with maxima
        Iout = np.maximum(Iout, Response)

    # translate filtered image

    # replace min floats
    Iout[Iout == np.finfo(Distance.dtype).min] = 0

    return Iout

Example 147

Project: HistomicsTK Source File: SimpleMask.py
def SimpleMask(I, BW=2, DefaultBGScale=2.5, DefaultTissueScale=30,
               MinPeak=10, MaxPeak=25, Percent=0.10, MinProb=0.05):
    """Performs segmentation of the foreground (tissue)
    Uses a simple two-component Gaussian mixture model to mask tissue areas
    from background in brightfield H&E images. Kernel-density estimation is
    used to create a smoothed image histogram, and then this histogram is
    analyzed to identify modes corresponding to tissue and background. The
    mode peaks are then analyzed to estimate their width, and a constrained
    optimization is performed to fit gaussians directly to the histogram
    (instead of using expectation-maximization directly on the data which
    is more prone to local minima effects). A maximum-likelihood threshold
    is then derived and used to mask the tissue area in a binarized image.

    Parameters
    ----------
    I : array_like
        An RGB image of type unsigned char.
    BW : double, optional
        Bandwidth for kernel density estimation - used for smoothing the
        grayscale histogram. Default value = 2.
    DefaultBGScale : double, optional
        Standard deviation of background gaussian to be used if
        estimation fails. Default value = 2.5.
    DefaultTissueScale: double, optional
        Standard deviation of tissue gaussian to be used if estimation fails.
        Default value = 30.
    MinPeak: double, optional
        Minimum peak width for finding peaks in KDE histogram. Used to
        initialize curve fitting process. Default value = 10.
    MaxPeak: double, optional
        Maximum peak width for finding peaks in KDE histogram. Used to
        initialize curve fitting process. Default value = 25.
    Percent: double, optional
        Percentage of pixels to sample for building foreground/background
        model. Default value = 0.10.
    MinProb : double, optional
        Minimum probability to qualify as tissue pixel. Default value = 0.05.

    Returns
    -------
    Mask : array_like
        A binarized version of `I` where foreground (tissue) has value '1'.

    See Also
    --------
    histomicstk.utils.Sample
    """

    # convert image to grayscale, flatten and sample
    I = 255 * color.rgb2gray(I)
    I = I.astype(np.uint8)
    sI = I.flatten()[:, np.newaxis]
    sI = sI[np.random.uniform(1, sI.size, (Percent * I.size,)).astype(int)]

    # kernel-density smoothed histogram
    KDE = KernelDensity(kernel='gaussian', bandwidth=BW).fit(sI)
    xHist = np.linspace(0, 255, 256)[:, np.newaxis]
    yHist = np.exp(KDE.score_samples(xHist))[:, np.newaxis]
    yHist = yHist / sum(yHist)

    # flip smoothed y-histogram so that background mode is on the left side
    yHist = np.flipud(yHist)

    # identify initial mean parameters for gaussian mixture distribution
    # take highest peak among remaining peaks as background
    Peaks = signal.find_peaks_cwt(yHist.flatten(), np.arange(MinPeak, MaxPeak))
    BGPeak = Peaks[0]
    if len(Peaks) > 1:
        TissuePeak = Peaks[yHist[Peaks[1:]].argmax() + 1]
    else:  # no peak found - take initial guess at 2/3 distance from origin
        TissuePeak = np.asscalar(xHist[np.round(0.66*xHist.size)])

    # analyze background peak to estimate variance parameter via FWHM
    BGScale = _EstimateVariance(xHist, yHist, BGPeak)
    if BGScale == -1:
        BGScale = DefaultBGScale

    # analyze tissue peak to estimate variance parameter via FWHM
    TissueScale = _EstimateVariance(xHist, yHist, TissuePeak)
    if TissueScale == -1:
        TissueScale = DefaultTissueScale

    # solve for mixing parameter
    Mix = yHist[BGPeak] * (BGScale * (2 * np.pi)**0.5)

    # flatten kernel-smoothed histogram and corresponding x values for
    # optimization
    xHist = xHist.flatten()
    yHist = yHist.flatten()

    # define gaussian mixture model
    def GaussianMixture(x, mu1, mu2, sigma1, sigma2, p):
        rv1 = norm(loc=mu1, scale=sigma1)
        rv2 = norm(loc=mu2, scale=sigma2)
        return p * rv1.pdf(x) + (1 - p) * rv2.pdf(x)

    # define gaussian mixture model residuals
    def GaussianResiduals(Parameters, y, x):
        mu1, mu2, sigma1, sigma2, p = Parameters
        yhat = GaussianMixture(x, mu1, mu2, sigma1, sigma2, p)
        return sum((y - yhat) ** 2)

    # fit Gaussian mixture model and unpack results
    Parameters = fmin_slsqp(GaussianResiduals,
                            [BGPeak, TissuePeak, BGScale, TissueScale, Mix],
                            args=(yHist, xHist),
                            bounds=[(0, 255), (0, 255),
                                    (np.spacing(1), 10),
                                    (np.spacing(1), 50), (0, 1)])
    muBackground = Parameters[0]
    muTissue = Parameters[1]
    sigmaBackground = Parameters[2]
    sigmaTissue = Parameters[3]
    p = Parameters[4]

    # create mask based on Gaussian mixture model
    Background = norm(loc=muBackground, scale=sigmaBackground)
    Tissue = norm(loc=muTissue, scale=sigmaTissue)
    pBackground = p * Background.pdf(xHist)
    pTissue = (1 - p) * Tissue.pdf(xHist)

    # identify maximum likelihood threshold
    Difference = pTissue - pBackground
    Candidates = np.nonzero(Difference >= 0)[0]
    Filtered = np.nonzero(xHist[Candidates] > muBackground)
    ML = xHist[Candidates[Filtered[0]][0]]

    # identify limits for tissue model (MinProb, 1-MinProb)
    Endpoints = np.asarray(Tissue.interval(1 - MinProb / 2))

    # invert threshold and tissue mean
    ML = 255 - ML
    muTissue = 255 - muTissue
    Endpoints = np.sort(255 - Endpoints)

    # generate mask
    Mask = (I <= ML) & (I >= Endpoints[0]) & (I <= Endpoints[1])
    Mask = Mask.astype(np.uint8)

    return Mask

Example 148

Project: EyeTab Source File: eye_center_locator_combined.py
Function: find_pupil
def find_pupil(eye_img_bgr, fast_width_grads=25.5, fast_width_iso=80, weight_grads=0.9, weight_iso=0.1, debug_index=False):
    
    eye_img_r = cv2.split(eye_img_bgr)[2]

    fast_size_grads = (int((fast_width_grads / eye_img_bgr.shape[0]) * eye_img_bgr.shape[1]), int(fast_width_grads))
    fast_img_grads = cv2.resize(eye_img_r, fast_size_grads)
    
    fast_size_iso = (int(fast_width_iso), int((fast_width_iso / eye_img_r.shape[1]) * eye_img_r.shape[0]))
    fast_img_iso = cv2.resize(eye_img_r, fast_size_iso)
    
    c_map_grads = eye_center_locator_gradients.get_center_map(fast_img_grads)
    c_map_iso = eye_center_locator_isophote.get_center_map(fast_img_iso)
    
    c_map_norm_grads = cv2.normalize(c_map_grads, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
    c_map_big_grads = cv2.resize(c_map_norm_grads, (eye_img_bgr.shape[1], eye_img_bgr.shape[0])).astype(np.uint8)
    
    c_map_norm_iso = cv2.normalize(c_map_iso, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
    c_map_big_iso = cv2.resize(c_map_norm_iso, (eye_img_bgr.shape[1], eye_img_bgr.shape[0])).astype(np.uint8)
    
    joint_c_map = cv2.addWeighted(c_map_big_grads, w_grads, c_map_big_iso, w_iso, 1.0) 
    
    max_val_index = np.argmax(joint_c_map)
    pupil_y0, pupil_x0 = max_val_index // joint_c_map.shape[1], max_val_index % joint_c_map.shape[1]
    
    max_val_index_2 = np.argmax(c_map_big_grads)
    pupil_y0_2, pupil_x0_2 = max_val_index_2 // joint_c_map.shape[1], max_val_index_2 % joint_c_map.shape[1]
    
    max_val_index_3 = np.argmax(c_map_big_iso)
    pupil_y0_3, pupil_x0_3 = max_val_index_3 // joint_c_map.shape[1], max_val_index_3 % joint_c_map.shape[1]
   
    if debug_index:
        
        debug_img = eye_img_bgr.copy()
        
        joint_c_map = cv2.cvtColor(joint_c_map, cv2.COLOR_GRAY2BGR)
        c_map_big_iso = cv2.cvtColor(c_map_big_iso, cv2.COLOR_GRAY2BGR)
        c_map_big_grads = cv2.cvtColor(c_map_big_grads, cv2.COLOR_GRAY2BGR)
        
        draw_utils.draw_cross(debug_img, (pupil_x0, pupil_y0), (0, 255, 255), 16, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0_3, pupil_y0_3), (255, 0, 255), 8, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0_2, pupil_y0_2), (255, 0, 255), 8, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0, pupil_y0), (255, 0, 0), 16, 2)
        
        draw_utils.draw_cross(c_map_big_iso, (pupil_x0_3, pupil_y0_3), (255, 0, 0), 16, 2)
        draw_utils.draw_cross(c_map_big_grads, (pupil_x0_2, pupil_y0_2), (255, 0, 0), 16, 2)
        
        stacked_imgs = image_utils.stack_imgs_horizontal([debug_img, c_map_big_grads, c_map_big_iso, joint_c_map])
        __debug_imgs[debug_index] = stacked_imgs
        
        if debug_index == 2:
            full_debug_img = image_utils.stack_imgs_vertical([__debug_imgs[1], __debug_imgs[2]]);
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs);
    
    return pupil_x0, pupil_y0

Example 149

Project: agdc Source File: water_rgb.py
def process(input_vrt_path, output_dir):
    
    water_rgb = (0, 169, 230) 
    
    assert os.path.exists(input_vrt_path), 'Input file %s does not exist' % input_vrt_path
    assert os.path.isdir(output_dir), 'Input output directory %s' % output_dir
    
    input_dataset = gdal.Open(input_vrt_path)
    assert input_dataset, 'Unable to open dataset %s' % input_vrt_path
    
    file_list = input_dataset.GetFileList() 
    
    for band_number in range(1,input_dataset.RasterCount + 1):
        input_band = input_dataset.GetRasterBand(band_number)
        water_mask = (input_band.ReadAsArray() == 128) # True==WET, False==DRY
        
        water_file_path = os.path.join(output_dir,
                                       os.path.basename(file_list[band_number])
                                       )
        
        if os.path.exists(water_file_path):
            print('Skipping existing dataset %s' % water_file_path)
            continue
    
        gdal_driver = gdal.GetDriverByName('GTiff')
        output_dataset = gdal_driver.Create(water_file_path, 
                                            input_dataset.RasterXSize, input_dataset.RasterYSize,
                                            3, gdal.GDT_Byte,
                                            ['INTERLEAVE=PIXEL'])
        
        assert output_dataset, 'Unable to open output dataset %s'% water_file_path   
                                        
        output_dataset.SetGeoTransform(input_dataset.GetGeoTransform())
        output_dataset.SetProjection(input_dataset.GetProjection()) 

        for output_band_index in range(3):
            output_band = output_dataset.GetRasterBand(output_band_index + 1)
            output_array = (water_mask * water_rgb[output_band_index]).astype(numpy.uint8)
            print('output_array = %s' % output_array)
            print('output_array[water_mask] = %s' % output_array[water_mask])
            output_band.WriteArray(output_array)
            output_band.SetNoDataValue(0)
            output_band.FlushCache()
            
        output_dataset.FlushCache()
        print('Finished writing output dataset %s' % water_file_path)

Example 150

Project: hedge Source File: fluxgather.py
    @memoize_method
    def get_kernel(self, fdata, ilist_data, for_benchmark):
        from cgen.cuda import CudaShared, CudaGlobal
        from pycuda.tools import dtype_to_ctype

        discr = self.discr
        given = self.plan.given
        fplan = self.plan
        d = discr.dimensions
        dims = range(d)

        elgroup, = discr.element_groups

        float_type = given.float_type

        f_decl = CudaGlobal(FunctionDeclaration(Value("void", "apply_flux"),
            [
                Pointer(POD(float_type, "debugbuf")),
                Pointer(POD(numpy.uint8, "gmem_facedata")),
                ]+[
                Pointer(POD(float_type, "gmem_fluxes_on_faces%d" % flux_nr))
                for flux_nr in range(len(self.fluxes))
                ]
            ))

        cmod = Module()
        cmod.append(Include("pycuda-helpers.hpp"))

        for dep_expr in self.all_deps:
            cmod.extend([
                Value("texture<%s, 1, cudaReadModeElementType>"
                    % dtype_to_ctype(float_type, with_fp_tex_hack=True),
                    "field%d_tex" % self.dep_to_index[dep_expr])
                ])

        if fplan.flux_count != len(self.fluxes):
            from warnings import warn
            warn("Flux count in flux execution plan different from actual flux count.\n"
                    "You may want to specify the tune_for= kwarg in the Discretization\n"
                    "constructor.")

        cmod.extend([
            Line(),
            Typedef(POD(float_type, "value_type")),
            Line(),
            flux_header_struct(float_type, discr.dimensions),
            Line(),
            face_pair_struct(float_type, discr.dimensions),
            Line(),
            Define("DIMENSIONS", discr.dimensions),
            Define("DOFS_PER_FACE", fplan.dofs_per_face),
            Define("THREADS_PER_FACE", fplan.threads_per_face()),
            Line(),
            Define("CONCURRENT_FACES", fplan.parallel_faces),
            Define("BLOCK_MB_COUNT", fplan.mbs_per_block),
            Line(),
            Define("FACEDOF_NR", "threadIdx.x"),
            Define("BLOCK_FACE", "threadIdx.y"),
            Line(),
            Define("FLUX_COUNT", len(self.fluxes)),
            Line(),
            Define("THREAD_NUM", "(FACEDOF_NR + BLOCK_FACE*THREADS_PER_FACE)"),
            Define("THREAD_COUNT", "(THREADS_PER_FACE*CONCURRENT_FACES)"),
            Define("COALESCING_THREAD_COUNT",
                "(THREAD_COUNT < 0x10 ? THREAD_COUNT : THREAD_COUNT & ~0xf)"),
            Line(),
            Define("DATA_BLOCK_SIZE", fdata.block_bytes),
            Define("ALIGNED_FACE_DOFS_PER_MB", fplan.aligned_face_dofs_per_microblock()),
            Define("ALIGNED_FACE_DOFS_PER_BLOCK",
                "(ALIGNED_FACE_DOFS_PER_MB*BLOCK_MB_COUNT)"),
            Line(),
            Define("FOF_BLOCK_BASE", "(blockIdx.x*ALIGNED_FACE_DOFS_PER_BLOCK)"),
            Line(),
            ] + ilist_data.code + [
            Line(),
            Value("texture<index_list_entry_t, 1, cudaReadModeElementType>",
                "tex_index_lists"),
            Line(),
            fdata.struct,
            Line(),
            CudaShared(Value("flux_data", "data")),
            ])

        if not fplan.direct_store:
            cmod.extend([
                CudaShared(
                    ArrayOf(
                        ArrayOf(
                            POD(float_type, "smem_fluxes_on_faces"),
                            "FLUX_COUNT"),
                        "ALIGNED_FACE_DOFS_PER_MB*BLOCK_MB_COUNT")
                    ),
                Line(),
                ])

        S = Statement
        f_body = Block()

        from hedge.backends.cuda.tools import get_load_code

        f_body.extend(get_load_code(
            dest="&data",
            base="gmem_facedata + blockIdx.x*DATA_BLOCK_SIZE",
            bytes="sizeof(flux_data)",
            descr="load face_pair data")
            +[S("__syncthreads()"), Line() ])

        def get_flux_code(flux_writer):
            flux_code = Block([])

            flux_code.extend([
                Initializer(Pointer(
                    Value("face_pair", "fpair")),
                    "data.facepairs+fpair_nr"),
                Initializer(
                    MaybeUnused(POD(numpy.uint32, "a_index")),
                    "fpair->a_base + tex1Dfetch(tex_index_lists, "
                    "fpair->a_ilist_index + FACEDOF_NR)"),
                Initializer(
                    MaybeUnused(POD(numpy.uint32, "b_index")),
                    "fpair->b_base + tex1Dfetch(tex_index_lists, "
                    "fpair->b_ilist_index + FACEDOF_NR)"),
                Line(),
                flux_writer(),
                Line(),
                S("fpair_nr += CONCURRENT_FACES")
                ])

            return flux_code

        flux_computation = Block([
            Comment("fluxes for dual-sided (intra-block) interior face pairs"),
            While("fpair_nr < data.header.same_facepairs_end",
                get_flux_code(lambda:
                    self.write_interior_flux_code(True))
                ),
            Line(),
            Comment("work around nvcc assertion failure"),
            S("fpair_nr+=1"),
            S("fpair_nr-=1"),
            Line(),
            Comment("fluxes for single-sided (inter-block) interior face pairs"),
            While("fpair_nr < data.header.diff_facepairs_end",
                get_flux_code(lambda:
                    self.write_interior_flux_code(False))
                ),
            Line(),
            Comment("fluxes for single-sided boundary face pairs"),
            While("fpair_nr < data.header.bdry_facepairs_end",
                get_flux_code(
                    lambda: self.write_boundary_flux_code(for_benchmark))
                ),
            ])

        f_body.extend_log_block("compute the fluxes", [
            Initializer(POD(numpy.uint32, "fpair_nr"), "BLOCK_FACE"),
            If("FACEDOF_NR < DOFS_PER_FACE", flux_computation)
            ])

        if not fplan.direct_store:
            f_body.extend([
                Line(),
                S("__syncthreads()"),
                Line()
                ])

            f_body.extend_log_block("store fluxes", [
                    #Assign("debugbuf[blockIdx.x]", "FOF_BLOCK_BASE"),
                    #Assign("debugbuf[0]", "FOF_BLOCK_BASE"),
                    #Assign("debugbuf[0]", "sizeof(face_pair)"),
                    For("unsigned word_nr = THREAD_NUM",
                        "word_nr < ALIGNED_FACE_DOFS_PER_MB*BLOCK_MB_COUNT",
                        "word_nr += COALESCING_THREAD_COUNT",
                        Block([Assign(
                            "gmem_fluxes_on_faces%d[FOF_BLOCK_BASE+word_nr]" % flux_nr,
                            "smem_fluxes_on_faces[%d][word_nr]" % flux_nr)
                            for flux_nr in range(len(self.fluxes))]
                           #+[If("isnan(smem_fluxes_on_faces[%d][word_nr])" % flux_nr,
                               #Block([
                                   #Assign("debugbuf[blockIdx.x]", "word_nr"),
                                   #])
                               #)
                            #for flux_nr in range(len(self.fluxes))]
                        )
                    )
                    ])
        if False:
            f_body.extend([
                    Assign("debugbuf[blockIdx.x*96+32+BLOCK_FACE*32+threadIdx.x]", "fpair_nr"),
                    Assign("debugbuf[blockIdx.x*96+16]", "data.header.same_facepairs_end"),
                    Assign("debugbuf[blockIdx.x*96+17]", "data.header.diff_facepairs_end"),
                    Assign("debugbuf[blockIdx.x*96+18]", "data.header.bdry_facepairs_end"),
                    ]
                    )

        # finish off ----------------------------------------------------------
        cmod.append(FunctionBody(f_decl, f_body))

        if not for_benchmark and "cuda_dump_kernels" in discr.debug:
            from hedge.tools import open_unique_debug_file
            open_unique_debug_file("flux_gather", ".cu").write(str(cmod))

        #from pycuda.tools import allow_user_edit
        mod = SourceModule(
                #allow_user_edit(cmod, "kernel.cu", "the flux kernel"),
                cmod,
                keep="cuda_keep_kernels" in discr.debug)
        expr_to_texture_map = dict(
                (dep_expr, mod.get_texref(
                    "field%d_tex" % self.dep_to_index[dep_expr]))
                for dep_expr in self.all_deps)

        index_list_texref = mod.get_texref("tex_index_lists")
        index_list_texref.set_address(
                ilist_data.device_memory,
                ilist_data.bytes)
        index_list_texref.set_format(
                cuda.dtype_to_array_format(ilist_data.type), 1)
        index_list_texref.set_flags(cuda.TRSF_READ_AS_INTEGER)

        func = mod.get_function("apply_flux")
        block = (fplan.threads_per_face(), fplan.parallel_faces, 1)
        func.prepare(
                (2+len(self.fluxes))*"P",
                texrefs=expr_to_texture_map.values()
                + [index_list_texref])

        if "cuda_flux" in discr.debug:
            print "flux: lmem=%d smem=%d regs=%d" % (
                    func.local_size_bytes,
                    func.shared_size_bytes,
                    func.num_regs)

        return block, func, expr_to_texture_map
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4