NUnit.Framework.Assert.AreEqual(double, double, double)

Here are the examples of the csharp api NUnit.Framework.Assert.AreEqual(double, double, double) taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

745 Examples 7

19 Source : TestColor.cs
with MIT License
from Arlorean

[Test]
        public void TestHSV() {
            float h, s, v;

            Color.Black.ToHSV(out h, out s, out v);
            replacedert.AreEqual(0, h);
            replacedert.AreEqual(0, s);
            replacedert.AreEqual(0, v);
            replacedert.AreEqual(Color.Black, Color.FromHSV(h, s, v));

            Color.White.ToHSV(out h, out s, out v);
            replacedert.AreEqual(0, h);
            replacedert.AreEqual(0, s);
            replacedert.AreEqual(1, v);
            replacedert.AreEqual(Color.White, Color.FromHSV(h, s, v));

            Color.Red.ToHSV(out h, out s, out v);
            replacedert.AreEqual(0, h);
            replacedert.AreEqual(1, s);
            replacedert.AreEqual(1, v);
            replacedert.AreEqual(Color.Red, Color.FromHSV(h, s, v));

            Color.Green.ToHSV(out h, out s, out v);
            replacedert.AreEqual(120, h);
            replacedert.AreEqual(1, s);
            replacedert.AreEqual(1, v);
            replacedert.AreEqual(Color.Green, Color.FromHSV(h, s, v));

            Color.Blue.ToHSV(out h, out s, out v);
            replacedert.AreEqual(240, h);
            replacedert.AreEqual(1, s);
            replacedert.AreEqual(1, v);
            replacedert.AreEqual(Color.Blue, Color.FromHSV(h, s, v));

            Color.Tan.ToHSV(out h, out s, out v);
            replacedert.AreEqual(34, h);
            replacedert.AreEqual(0.3333333, s, 1e-6);
            replacedert.AreEqual(0.8235294, v, 1e-6);
            replacedert.AreEqual(Color.Tan, Color.FromHSV(h, s, v));
        }

19 Source : NunitAssert.cs
with Apache License 2.0
from AutomateThePlanet

public void AreEqual(double expected, double actual, double delta) => NU.replacedert.AreEqual(expected, actual, delta);

19 Source : Then.cs
with Apache License 2.0
from jameschch

[Then(@"the Sharpe Ratio should be (.*)")]
        public void ThenTheSharpeRatioShouldBe(double p0)
        {
            var actual = GetResults();
            var predicted = GetPredicted(actual);
            replacedert.AreEqual(p0, (double)predicted.Value["SharpeRatio"], 0.001);
        }

19 Source : AssertUtils.cs
with MIT License
from microsoft

public static void AreEqualSKRect(SKRect expected, SKRect actual)
        {
            replacedert.AreEqual(expected.Left, actual.Left, Tolerance);
            replacedert.AreEqual(expected.Top, actual.Top, Tolerance);
            replacedert.AreEqual(expected.Right, actual.Right, Tolerance);
            replacedert.AreEqual(expected.Bottom, actual.Bottom, Tolerance);
        }

19 Source : ProbabilityTests.cs
with Apache License 2.0
from nesfit

[TestCase(3, 10000)]
        [TestCase(3, 100000)]
        [TestCase(4, 10000)]
        [TestCase(4, 100000)]
        [TestCase(5, 10000)]
        [TestCase(5, 100000)]
        public void Test_Normal_Estimation(int d, int n)
        {
            // generate mu and sigma
            Vector means = Vector.Zeros(d);
            // replaceduming diagonal covariance matrix
            // for generation purposes equal to the
            // sqrt of the mean (easy to test)
            Vector sigma = Vector.Zeros(d);
            for (int i = 0; i < d; i++)
            {
                means[i] = Sampling.GetUniform() * 10;
                sigma[i] = sqrt(means[i]);
            }
            

            Matrix data = Matrix.Zeros(n, d);

            for (int i = 0; i < n; i++)
                for (int j = 0; j < d; j++)
                    data[i, j] = Sampling.GetNormal(means[j], sigma[j]);

            NormalDistribution dstrb = new NormalDistribution();
            dstrb.Estimate(data);

            var cov = dstrb.Sigma.Diag();
            for (int i = 0; i < d; i++)
            {
                // test mean (should be 0, but with 10% tolerance)
                replacedert.AreEqual(diff(means[i], dstrb.Mu[i]), 0, 0.1);
                // test covariance (should be 0, but with 10% tolerance)
                replacedert.AreEqual(diff(means[i], cov[i]), 0, 0.1);
            }
        }

19 Source : SequentialMinimalOptimizationRegressionTest.cs
with MIT License
from PacktPublishing

[Test]
        public void TrainTest()
        {
            Accord.Math.Tools.SetupGenerator(0);

            // Example regression problem. Suppose we are trying
            // to model the following equation: f(x, y) = 2x + y

            double[][] inputs = // (x, y)
            {
                new double[] { 0,  1 }, // 2*0 + 1 =  1
                new double[] { 4,  3 }, // 2*4 + 3 = 11
                new double[] { 8, -8 }, // 2*8 - 8 =  8
                new double[] { 2,  2 }, // 2*2 + 2 =  6
                new double[] { 6,  1 }, // 2*6 + 1 = 13
                new double[] { 5,  4 }, // 2*5 + 4 = 14
                new double[] { 9,  1 }, // 2*9 + 1 = 19
                new double[] { 1,  6 }, // 2*1 + 6 =  8
            };

            double[] outputs = // f(x, y)
            {
                1, 11, 8, 6, 13, 14, 19, 8
            };

            // Create Kernel Support Vector Machine with a Polynomial Kernel of 2nd degree
            var machine = new KernelSupportVectorMachine(new Polynomial(2), inputs: 2);

            // Create the sequential minimal optimization teacher
            var learn = new SequentialMinimalOptimizationRegression(machine, inputs, outputs)
            {
                Complexity = 100
            };

            // Run the learning algorithm
            double error = learn.Run();

            // Compute the answer for one particular example
            double fxy = machine.Compute(inputs[0]); // 1.0003849827673186

            // Check for correct answers
            double[] answers = new double[inputs.Length];
            for (int i = 0; i < answers.Length; i++)
                answers[i] = machine.Compute(inputs[i]);

            replacedert.AreEqual(1.0, fxy, 1e-2);
            for (int i = 0; i < outputs.Length; i++)
                replacedert.AreEqual(outputs[i], answers[i], 1e-2);
        }

19 Source : SequentialMinimalOptimizationRegressionTest.cs
with MIT License
from PacktPublishing

[Test]
        public void learn_test()
        {
            #region doc_learn
            Accord.Math.Random.Generator.Seed = 0;

            // Example regression problem. Suppose we are trying
            // to model the following equation: f(x, y) = 2x + y

            double[][] inputs = // (x, y)
            {
                new double[] { 0,  1 }, // 2*0 + 1 =  1
                new double[] { 4,  3 }, // 2*4 + 3 = 11
                new double[] { 8, -8 }, // 2*8 - 8 =  8
                new double[] { 2,  2 }, // 2*2 + 2 =  6
                new double[] { 6,  1 }, // 2*6 + 1 = 13
                new double[] { 5,  4 }, // 2*5 + 4 = 14
                new double[] { 9,  1 }, // 2*9 + 1 = 19
                new double[] { 1,  6 }, // 2*1 + 6 =  8
            };

            double[] outputs = // f(x, y)
            {
                1, 11, 8, 6, 13, 14, 19, 8
            };

            // Create the sequential minimal optimization teacher
            var learn = new SequentialMinimalOptimizationRegression<Polynomial>()
            {
                Kernel = new Polynomial(2), // Polynomial Kernel of 2nd degree
                Complexity = 100
            };

            // Run the learning algorithm
            SupportVectorMachine<Polynomial> svm = learn.Learn(inputs, outputs);

            // Compute the predicted scores
            double[] predicted = svm.Score(inputs);

            // Compute the error between the expected and predicted
            double error = new SquareLoss(outputs).Loss(predicted);

            // Compute the answer for one particular example
            double fxy = svm.Score(inputs[0]); // 1.0003849827673186
            #endregion

            replacedert.AreEqual(1.0, fxy, 1e-2);
            for (int i = 0; i < outputs.Length; i++)
                replacedert.AreEqual(outputs[i], predicted[i], 1e-2);
        }

19 Source : NonlinearConjugateGradientTest.cs
with MIT License
from PacktPublishing

[Test]
        public void MinimizeTest()
        {
            Func<double[], double> f = rosenbrockFunction;
            Func<double[], double[]> g = rosenbrockGradient;

            replacedert.AreEqual(104, f(new[] { -1.0, 2.0 }));


            int n = 2; // number of variables
            double[] initial = { -1.2, 1 };

            var cg = new NonlinearConjugateGradient(n, f, g);

            replacedert.IsTrue(cg.Minimize(initial));
            double actual = cg.Value;
            double expected = 0;
            replacedert.AreEqual(expected, actual, 1e-6);

            double[] result = cg.Solution;

            replacedert.AreEqual(180, cg.Evaluations);
            replacedert.AreEqual(67, cg.Iterations);
            replacedert.AreEqual(1.0, result[0], 1e-3);
            replacedert.AreEqual(1.0, result[1], 1e-3);
            replacedert.IsFalse(double.IsNaN(result[0]));
            replacedert.IsFalse(double.IsNaN(result[1]));

            double y = f(result);
            double[] d = g(result);

            replacedert.AreEqual(0.0, y, 1e-6);
            replacedert.AreEqual(0.0, d[0], 1e-3);
            replacedert.AreEqual(0.0, d[1], 1e-3);

            replacedert.IsFalse(double.IsNaN(y));
            replacedert.IsFalse(double.IsNaN(d[0]));
            replacedert.IsFalse(double.IsNaN(d[1]));
        }

19 Source : HighPassFilterTest.cs
with MIT License
from PacktPublishing

[Test]
        public void sample_test()
        {
            string basePath = NUnit.Framework.TestContext.CurrentContext.TestDirectory;
            string pathWhereTheDatasetShouldBeStored = Path.Combine(basePath, "mfcc");

            #region doc_example1
            // Let's say we would like to replacedyse an audio sample. To give an example that
            // could be reproduced by anyone without having to give a specific sound file
            // that would need to have been downloaded by every user trying to run this example,
            // we will use obtain an example from the Free Spoken Digits Dataset instead:
            var fsdd = new FreeSpokenDigitsDataset(path: pathWhereTheDatasetShouldBeStored);

            // Let's obtain one of the audio signals:
            Signal a = fsdd.GetSignal(0, "jackson", 10);
            int sampleRate = a.SampleRate; // 8000

            // Note: if you would like to load a signal from the 
            // disk, you could use the following method directly:
            // Signal a = Signal.FromFile(fileName);

            // Create a high-preplaced filter to keep only frequencies above 2000 Hz
            var filter = new HighPreplacedFilter(frequency: 2000, sampleRate: sampleRate);

            // Apply the filter to the signal
            Signal result = filter.Apply(a);

            // Create a spectrogram for the original
            var sourceSpectrum = new Spectrogram(a);

            // Create a spectrogram for the filtered signal:
            var resultSpectrum = new Spectrogram(result);

            // Get the count for a high frequency before and after the high-preplaced filter:
            double before = sourceSpectrum.GetFrequencyCount(windowIndex: 0, frequency: 100); // 0.0015747246599406217
            double after = resultSpectrum.GetFrequencyCount(windowIndex: 0, frequency: 100);  // 7.7444174980265885E-05
            #endregion

            replacedert.AreEqual(0.0015747246599406217, before, 1e-8);
            replacedert.AreEqual(7.7444174980265885E-05, after, 1e-8);
        }

19 Source : LowPassFilterTest.cs
with MIT License
from PacktPublishing

[Test]
        public void ApplyTest()
        {
            int n = 16384;
            int sampleRate = 1000;

            double f1 = 22;
            double f2 = 300;

            Signal cosine = new CosineGenerator(f1, 1, sampleRate).Generate(n);
            Signal sine = new SineGenerator(f2, 1, sampleRate).Generate(n);

            var merge = new AddFilter(cosine);
            merge.Normalize = true;
            Signal original = merge.Apply(sine);

            var of1 = FindFrequencyCount(sampleRate, original, f1);
            var of2 = FindFrequencyCount(sampleRate, original, f2);
            replacedert.AreEqual(0.359128660199268, of1, 1e-8);
            replacedert.AreEqual(0.47955332752802149, of2, 1e-8);

            Signal lowFiltered1 = new LowPreplacedFilter(f1, sampleRate).Apply(original);
            Signal lowFiltered2 = new LowPreplacedFilter(f2, sampleRate).Apply(original);

            Signal highFiltered1 = new HighPreplacedFilter(f1, sampleRate).Apply(original);
            Signal highFiltered2 = new HighPreplacedFilter(f2, sampleRate).Apply(original);

            var lf11 = FindFrequencyCount(sampleRate, lowFiltered1, f1);
            var lf12 = FindFrequencyCount(sampleRate, lowFiltered1, f2);
            replacedert.AreEqual(0.24589601823749971, lf11, 1e-8); // should be higher
            replacedert.AreEqual(0.038266797164259778, lf12, 1e-8);
            replacedert.IsTrue(lf11 > lf12);

            var lf21 = FindFrequencyCount(sampleRate, lowFiltered2, f1);
            var lf22 = FindFrequencyCount(sampleRate, lowFiltered2, f2);
            replacedert.AreEqual(0.35642263929018364, lf21, 1e-8); // should not have much difference
            replacedert.AreEqual(0.271181864130875, lf22, 1e-8);

            var hf11 = FindFrequencyCount(sampleRate, highFiltered1, f1);
            var hf12 = FindFrequencyCount(sampleRate, highFiltered1, f2);
            replacedert.AreEqual(0.24542517074628975, hf11, 1e-8);  // should not have much difference
            replacedert.AreEqual(0.44797847700473359, hf12, 1e-8);

            var hf21 = FindFrequencyCount(sampleRate, highFiltered2, f1);
            var hf22 = FindFrequencyCount(sampleRate, highFiltered2, f2);
            replacedert.AreEqual(0.026113299330488803, hf21, 1e-8);
            replacedert.AreEqual(0.23279968506488344, hf22, 1e-8); // should be higher
            replacedert.IsTrue(hf22 > hf21);

            replacedert.AreEqual(16384, cosine.Duration.TotalMilliseconds);
            replacedert.AreEqual(16384, sine.Duration.TotalMilliseconds);
            replacedert.AreEqual(16384, original.Duration.TotalMilliseconds);
        }

19 Source : NonlinearConjugateGradientTest.cs
with MIT License
from PacktPublishing

[Test]
        public void ConstructorTest2()
        {
            Func<double[], double> function = // min f(x) = 10 * (x+1)^2 + y^2
                x => 10.0 * Math.Pow(x[0] + 1.0, 2.0) + Math.Pow(x[1], 2.0);

            Func<double[], double[]> gradient = x => new[] { 20 * (x[0] + 1), 2 * x[1] };

            NonlinearConjugateGradient target = new NonlinearConjugateGradient(2)
            {
                Function = function,
                Gradient = gradient
            };

            replacedert.IsTrue(target.Minimize());
            double minimum = target.Value;

            double[] solution = target.Solution;

            replacedert.AreEqual(0, minimum, 1e-10);
            replacedert.AreEqual(-1, solution[0], 1e-5);
            replacedert.AreEqual(0, solution[1], 1e-5);

            double expectedMinimum = function(target.Solution);
            replacedert.AreEqual(expectedMinimum, minimum);
        }

19 Source : NonlinearConjugateGradientTest.cs
with MIT License
from PacktPublishing

[Test]
        public void min_test()
        {
            #region doc_minimize
            // Ensure that results are reproducible
            Accord.Math.Random.Generator.Seed = 0;

            // Suppose we would like to find the minimum of the function
            // 
            //   f(x,y)  =  -exp{-(x-1)²} - exp{-(y-2)²/2}
            //

            // First we need write down the function either as a named
            // method, an anonymous method or as a lambda function:

            Func<double[], double> f = (x) =>
                -Math.Exp(-Math.Pow(x[0] - 1, 2)) - Math.Exp(-0.5 * Math.Pow(x[1] - 2, 2));

            // Now, we need to write its gradient, which is just the
            // vector of first partial derivatives del_f / del_x, as:
            //
            //   g(x,y)  =  { del f / del x, del f / del y }
            // 

            Func<double[], double[]> g = (x) => new double[]
            {
                // df/dx = {-2 e^(-    (x-1)^2) (x-1)}
                2 * Math.Exp(-Math.Pow(x[0] - 1, 2)) * (x[0] - 1),

                // df/dy = {-  e^(-1/2 (y-2)^2) (y-2)}
                Math.Exp(-0.5 * Math.Pow(x[1] - 2, 2)) * (x[1] - 2)
            };

            // Finally, we create a fmincg solver for the two variable problem:
            var fmincg = new NonlinearConjugateGradient(numberOfVariables: 2)
            {
                Function = f,
                Gradient = g
            };

            // And then minimize the function:
            bool success = fmincg.Minimize();     // should be true
            double minValue = fmincg.Value;       // should be -2
            double[] solution = fmincg.Solution;  // should be (1, 2)

            // The resultant minimum value should be -2, and the solution
            // vector should be { 1.0, 2.0 }. The answer can be checked on
            // Wolfram Alpha by clicking the following the link:

            // http://www.wolframalpha.com/input/?i=maximize+%28exp%28-%28x-1%29%C2%B2%29+%2B+exp%28-%28y-2%29%C2%B2%2F2%29%29
            #endregion

            replacedert.IsTrue(success);
            double expected = -2;
            replacedert.AreEqual(expected, minValue, 1e-10);

            replacedert.AreEqual(1, solution[0], 1e-3);
            replacedert.AreEqual(2, solution[1], 1e-3);

        }

19 Source : LowPassFilterTest.cs
with MIT License
from PacktPublishing

[Test]
        public void sample_test()
        {
            string basePath = NUnit.Framework.TestContext.CurrentContext.TestDirectory;
            string pathWhereTheDatasetShouldBeStored = Path.Combine(basePath, "mfcc");

            #region doc_example1
            // Let's say we would like to replacedyse an audio sample. To give an example that
            // could be reproduced by anyone without having to give a specific sound file
            // that would need to have been downloaded by every user trying to run this example,
            // we will use obtain an example from the Free Spoken Digits Dataset instead:
            var fsdd = new FreeSpokenDigitsDataset(path: pathWhereTheDatasetShouldBeStored);

            // Let's obtain one of the audio signals:
            Signal a = fsdd.GetSignal(0, "jackson", 10);
            int sampleRate = a.SampleRate; // 8000

            // Note: if you would like to load a signal from the 
            // disk, you could use the following method directly:
            // Signal a = Signal.FromFile(fileName);

            // Create a low-preplaced filter to keep only frequencies below 100 Hz
            var filter = new LowPreplacedFilter(frequency: 100, sampleRate: sampleRate);

            // Apply the filter to the signal
            Signal result = filter.Apply(a);

            // Create a spectrogram for the original
            var sourceSpectrum = new Spectrogram(a);

            // Create a spectrogram for the filtered signal:
            var resultSpectrum = new Spectrogram(result);

            // Get the count for a high frequency before and after the low-preplaced filter:
            double before = sourceSpectrum.GetFrequencyCount(windowIndex: 0, frequency: 1000); // 0.00028203820434203334
            double after = resultSpectrum.GetFrequencyCount(windowIndex: 0, frequency: 1000);  // 2.9116651158267508E-05
            #endregion

            replacedert.AreEqual(0.00028203820434203334, before, 1e-8);
            replacedert.AreEqual(2.9116651158267508E-05, after, 1e-8);
        }

19 Source : BagOfAudioWordsTest.cs
with MIT License
from PacktPublishing

[Test]
        public void learn()
        {
            Type t = typeof(WaveEncoder);

            string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "learn");

            #region doc_learn
            // Ensure results are reproducible
            Accord.Math.Random.Generator.Seed = 0;

            // The Bag-of-Audio-Words model converts audio signals of arbitrary 
            // size into fixed-length feature vectors. In this example, we
            // will be setting the codebook size to 10. This means all feature
            // vectors that will be generated will have the same length of 10.

            // By default, the BoW object will use the MFCC extractor as the 
            // feature extractor and K-means as the clustering algorithm.

            // Create a new Bag-of-Audio-Words (BoW) model
            var bow = BagOfAudioWords.Create(numberOfWords: 32);
            // Note: a simple BoW model can also be created using
            // var bow = new BagOfAudioWords(numberOfWords: 10);

            // Get some training images
            FreeSpokenDigitsDataset fsdd = new FreeSpokenDigitsDataset(basePath);
            string[] trainFileNames = fsdd.Training.LocalPaths;
            int[] trainOutputs = fsdd.Training.Digits;

            // Compute the model
            bow.Learn(trainFileNames);

            // After this point, we will be able to translate
            // the signals into double[] feature vectors using
            double[][] trainInputs = bow.Transform(trainFileNames);

            // We can also check some statistics about the dataset:
            int numberOfSignals = bow.Statistics.TotalNumberOfInstances; // 1350

            // Statistics about all the descriptors that have been extracted:
            int totalDescriptors = bow.Statistics.TotalNumberOfDescriptors; // 29106
            double totalMean = bow.Statistics.TotalNumberOfDescriptorsPerInstance.Mean; // 21.56
            double totalVar = bow.Statistics.TotalNumberOfDescriptorsPerInstance.Variance; // 52.764002965159314
            IntRange totalRange = bow.Statistics.TotalNumberOfDescriptorsPerInstanceRange; // [8, 115]

            // Statistics only about the descriptors that have been actually used:
            int takenDescriptors = bow.Statistics.NumberOfDescriptorsTaken; // 29106
            double takenMean = bow.Statistics.NumberOfDescriptorsTakenPerInstance.Mean; // 21.56
            double takenVar = bow.Statistics.NumberOfDescriptorsTakenPerInstance.Variance; // 52.764002965159314
            IntRange takenRange = bow.Statistics.NumberOfDescriptorsTakenPerInstanceRange; // [8, 115]
            #endregion

            replacedert.AreEqual(1350, numberOfSignals);

            replacedert.AreEqual(29106, totalDescriptors);
            replacedert.AreEqual(21.56, totalMean);
            replacedert.AreEqual(52.764002965159314, totalVar, 1e-8);
            replacedert.AreEqual(new IntRange(8, 115), totalRange);

            replacedert.AreEqual(29106, takenDescriptors);
            replacedert.AreEqual(21.56, takenMean);
            replacedert.AreEqual(52.764002965159314, takenVar, 1e-8);
            replacedert.AreEqual(new IntRange(8, 115), takenRange);


            var kmeans = bow.Clustering as KMeans;
            replacedert.AreEqual(13, kmeans.Clusters.NumberOfInputs);
            replacedert.AreEqual(32, kmeans.Clusters.NumberOfOutputs);
            replacedert.AreEqual(32, kmeans.Clusters.NumberOfClreplacedes);

            #region doc_clreplacedification

            // Now, the features can be used to train any clreplacedification
            // algorithm as if they were the signals themselves. For example,
            // we can use them to train an Chi-square SVM as shown below:

            // Create the SMO algorithm to learn a Chi-Square kernel SVM
            var teacher = new MulticlreplacedSupportVectorLearning<ChiSquare>()
            {
                Learner = (p) => new SequentialMinimalOptimization<ChiSquare>()
            };

            // Obtain a learned machine
            var svm = teacher.Learn(trainInputs, trainOutputs);

            // Use the machine to clreplacedify the features
            int[] output = svm.Decide(trainInputs);

            // Compute the error between the expected and predicted labels for the training set:
            var trainMetrics = GeneralConfusionMatrix.Estimate(svm, trainInputs, trainOutputs);
            double trainAcc = trainMetrics.Accuracy; // should be around 0.97259259259259256

            // Now, we can evaluate the performance of the model on the testing set:
            string[] testFileNames = fsdd.Testing.LocalPaths;
            int[] testOutputs = fsdd.Testing.Digits;

            // First we transform the testing set to double[]:
            double[][] testInputs = bow.Transform(testFileNames);

            // Then we compute the error between expected and predicted for the testing set:
            var testMetrics = GeneralConfusionMatrix.Estimate(svm, testInputs, testOutputs);
            double testAcc = testMetrics.Accuracy; // should be around 0.8666666666666667
            #endregion

            replacedert.AreEqual(0.97259259259259256, trainAcc, 1e-8);
            replacedert.AreEqual(0.8666666666666667, testAcc, 1e-8);
        }

19 Source : MFCCTests.cs
with MIT License
from PacktPublishing

[Test]
        public void dctTest()
        {
            double[,] input =
            {
                { 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.862745098039216, 0, 0.662745098039216, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.529411764705882, 0.129411764705882, 0.262745098039216, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.0627450980392157, 0.662745098039216, 0.0627450980392157, 0.862745098039216, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.662745098039216, 0, 0, 0, 0.529411764705882, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.262745098039216, 0.529411764705882, 0.996078431372549, 0.729411764705882, 0.0627450980392157, 0.996078431372549, 0.996078431372549 },
                { 0.862745098039216, 0, 0.929411764705882, 0.996078431372549, 0.996078431372549, 0.129411764705882, 0.662745098039216, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549 }
            };

            double[,] actual = MFCC.dct(input);

            double[,] expected =
            {
                {2.81733525e+00, 1.66533454e-16, -1.66533454e-16, 3.33066907e-16, 1.11022302e-16, -2.22044605e-16, -2.16493490e-15, 3.35842465e-15, -2.89805495e-15, 1.27675648e-15, -1.38777878e-16, 1.11022302e-16, -5.55111512e-16},
                {2.30017676e+00,  -1.01685593e-01,  6.39620389e-01,  2.49486410e-01, -4.22877585e-01, -2.88532226e-01,  1.92779960e-01,  2.69574011e-01, -2.79684150e-15, -2.69574011e-01, -1.92779960e-01,  2.88532226e-01, 4.22877585e-01},
                {2.08665825e+00, -1.42639076e-01, 8.28396427e-01, 2.65887914e-01, -4.00693843e-01, -1.00952383e-01, 9.05748550e-02, -1.28623891e-01, -2.98582131e-15, 1.28623891e-01, -9.05748550e-02, 1.00952383e-01,  4.00693843e-01},
                {1.99237734e+00,  -1.63700997e-01,   7.89221535e-01,   2.25643042e-01,  -7.07106781e-02,   1.71404755e-01,  -2.50369577e-01,  -6.26823429e-01,  -3.06334101e-15,   6.26823429e-01,   2.50369577e-01,  -1.71404755e-01,   7.07106781e-02},
                {1.47799182e+00,  -2.85640977e-01,   1.13636006e+00,   2.92134687e-01,  -6.93241942e-02,   1.11822658e-01,  -1.40537494e-01,  -1.27499859e-01,  -2.50926526e-15,   1.27499859e-01,   1.40537494e-01,  -1.11822658e-01,   6.93241942e-02},
                {1.96880712e+00,  -1.49227094e-01,   2.50745082e-01,  -2.31392812e-01,   6.59966329e-01,   5.16004960e-01,  -2.56935387e-01,   2.66947291e-01,  -1.83232338e-15,  -2.66947291e-01,   2.56935387e-01,  -5.16004960e-01,  -6.59966329e-01},
                {1.97019360e+00,  -1.18683797e-01,  -1.37378296e-01,  -3.83097842e-01,   7.52860749e-01,   3.66003432e-01,   1.57452149e-01,   5.03682548e-01,  -1.59583599e-15,  -5.03682548e-01,  -1.57452149e-01,  -3.66003432e-01,  -7.52860749e-01},
                {2.81733525e+00,   1.66533454e-16,  -1.66533454e-16,   3.33066907e-16,   1.11022302e-16,  -2.22044605e-16,  -2.16493490e-15,  3.35842465e-15,  -2.89805495e-15,   1.27675648e-15,  -1.38777878e-16,   1.11022302e-16,  -5.55111512e-16}
            };

            replacedert.AreEqual(actual.GetLength(0), 8);
            replacedert.AreEqual(actual.GetLength(1), 13);

            for (int w_i = 0; w_i < expected.GetLength(0); w_i++)
            {
                for (int w_j = 0; w_j < expected.GetLength(1); w_j++)
                    replacedert.AreEqual(expected[w_i, w_j], actual[w_i, w_j], 0.000001);
            }
        }

19 Source : MFCCTests.cs
with MIT License
from PacktPublishing

[Test]
        public void dct2Test()
        {
            double[,] input =
            {
                { 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.862745098039216, 0, 0.662745098039216, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.529411764705882, 0.129411764705882, 0.262745098039216, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.0627450980392157, 0.662745098039216, 0.0627450980392157, 0.862745098039216, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.662745098039216, 0, 0, 0, 0.529411764705882, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.262745098039216, 0.529411764705882, 0.996078431372549, 0.729411764705882, 0.0627450980392157, 0.996078431372549, 0.996078431372549 },
                { 0.862745098039216, 0, 0.929411764705882, 0.996078431372549, 0.996078431372549, 0.129411764705882, 0.662745098039216, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549 }
            };

            double[,] actual = MFCC.dct2(input);

            double[,] expected =
            {
                {1.99215686e+00,   8.32667268e-17,  -8.32667268e-17,   1.66533454e-16,   5.55111512e-17,  -1.11022302e-16,  -1.08246745e-15,   1.67921232e-15,  -1.44902748e-15,   6.38378239e-16,  -6.93889390e-17,   5.55111512e-17,  -2.77555756e-16},
                {1.62647059e+00,  -5.08427964e-02,   3.19810194e-01,   1.24743205e-01,  -2.11438792e-01,  -1.44266113e-01,   9.63899799e-02,   1.34787005e-01,  -1.39842075e-15,  -1.34787005e-01,  -9.63899799e-02,   1.44266113e-01,   2.11438792e-01},
                {1.47549020e+00,  -7.13195379e-02,   4.14198213e-01,   1.32943957e-01,  -2.00346921e-01,  -5.04761913e-02,   4.52874275e-02,  -6.43119454e-02,  -1.49291065e-15,   6.43119454e-02,  -4.52874275e-02,   5.04761913e-02,   2.00346921e-01},
                {1.40882353e+00,  -8.18504983e-02,   3.94610767e-01,   1.12821521e-01,  -3.53553391e-02,   8.57023774e-02,  -1.25184788e-01,  -3.13411715e-01,  -1.53167050e-15,   3.13411715e-01,   1.25184788e-01,  -8.57023774e-02,   3.53553391e-02},
                {1.04509804e+00,  -1.42820489e-01,   5.68180030e-01,   1.46067343e-01,  -3.46620971e-02,   5.59113288e-02,  -7.02687468e-02,  -6.37499293e-02,  -1.25463263e-15,   6.37499293e-02,   7.02687468e-02,  -5.59113288e-02,   3.46620971e-02},
                {1.39215686e+00,  -7.46135469e-02,   1.25372541e-01,  -1.15696406e-01,   3.29983165e-01,   2.58002480e-01,  -1.28467693e-01,   1.33473645e-01,  -9.16161690e-16,  -1.33473645e-01,   1.28467693e-01,  -2.58002480e-01,  -3.29983165e-01},
                {1.39313725e+00,  -5.93418985e-02,  -6.86891478e-02,  -1.91548921e-01,   3.76430375e-01,   1.83001716e-01,   7.87260746e-02,   2.51841274e-01,  -7.97917996e-16,  -2.51841274e-01,  -7.87260746e-02,  -1.83001716e-01,  -3.76430375e-01},
                {1.99215686e+00,   8.32667268e-17,  -8.32667268e-17,   1.66533454e-16,   5.55111512e-17,  -1.11022302e-16,  -1.08246745e-15,   1.67921232e-15,  -1.44902748e-15,   6.38378239e-16,  -6.93889390e-17,   5.55111512e-17,  -2.77555756e-16}
            };

            replacedert.AreEqual(actual.GetLength(0), 8);
            replacedert.AreEqual(actual.GetLength(1), 13);

            for (int w_i = 0; w_i < expected.GetLength(0); w_i++)
            {
                for (int w_j = 0; w_j < expected.GetLength(1); w_j++)
                    replacedert.AreEqual(expected[w_i, w_j], actual[w_i, w_j], 0.000001);
            }
        }

19 Source : MFCCTests.cs
with MIT License
from PacktPublishing

[Test]
        public void dct3Test()
        {
            double[,] input =
            {
                {  6.1917, -0.3411,  1.2418,  0.1492},
                {  0.2205,  0.0214,  0.4503,  0.3947},
                {  1.0423,  0.2214, -1.0017, -0.2720},
                { -0.2340, -0.0392, -0.2617, -0.2866}
            };

            double[,] actual = MFCC.dct3(input, 5);

            double[,] expected =
            {
                {3.86377949,  2.36972151,  1.85405,     3.05450388,  4.33719512},
                {0.7269018,  -0.39170376, -0.34005,     0.33390305,  0.2221989},
                {-0.238556,    1.21951535,  1.52285,     0.4418693,  -0.33992865},
                {-0.53446042,  0.21340136,  0.1447,     -0.28566187, -0.12297908}
            };

            replacedert.AreEqual(actual.GetLength(0), 4);
            replacedert.AreEqual(actual.GetLength(1), 5);

            for (int w_i = 0; w_i < expected.GetLength(0); w_i++)
            {
                for (int w_j = 0; w_j < expected.GetLength(1); w_j++)
                    replacedert.AreEqual(expected[w_i, w_j], actual[w_i, w_j], 0.000001);
            }
        }

19 Source : SignalTest.cs
with MIT License
from PacktPublishing

[Test]
        public void GetEnergyTest()
        {
            Signal target = Signal.FromArray(data, 8000);

            double expected = 0.54439;
            double actual = target.GetEnergy();
            replacedert.AreEqual(expected, actual, 1e-4);
        }

19 Source : SignalTest.cs
with MIT License
from PacktPublishing

[Test]
        public void GetEnergyTest_doc()
        {
            string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "energy");

            #region doc_energy
            // Let's say we would like to compute the energy of an audio signal. For this,
            // we will take an example signal from the Free Spoken Digits Dataset (FSDD):
            FreeSpokenDigitsDataset fsdd = new FreeSpokenDigitsDataset(basePath);
            Signal signal = fsdd.GetSignal(digit: 3, speaker: "jackson", index: 0);

            // The energy is defined as the sum of squared values in all 
            // channels of the audio signal. In this case, it should be:
            double energy = signal.GetEnergy(); // 19.448728048242629
            #endregion

            replacedert.AreEqual(19.448728048242629, energy, 1e-10);
        }

19 Source : HaralickTest.cs
with MIT License
from PacktPublishing

[Test]
        public void ComputeTest()
        {
            int size = 255;
            UnmanagedImage output = createGradient(size);

            Haralick haralick = new Haralick()
            {
                Mode = HaralickMode.AverageWithRange
            };

            replacedert.AreEqual(13, haralick.Features);
            replacedert.AreEqual(4, haralick.Degrees.Count);

            List<double[]> result = haralick.ProcessImage(output);

            GrayLevelCooccurrenceMatrix glcm = haralick.Matrix;
            HaralickDescriptorDictionary[,] features = haralick.Descriptors;

            replacedert.AreEqual(1, features.GetLength(0));
            replacedert.AreEqual(1, features.GetLength(1));


            replacedert.AreEqual(1, result.Count);
            double[] actual = result[0];
            double[] expected =
            {
                0.00393314806237454, 1.54392465647286E-05, 0.749999999999999,
                0.999999999999998, 3.63895932683582E+37, 1.45558373073433E+38,
                126.87498462129943, 1, 0.624999999999999,
                0.499999999999998, 254, 3.12638803734444E-13, 83280.6247942052,
                167.714124263744, 5.5383165865535, 0.00392927813992738,
                5.5383165865535, 0.00392927813992738, 0.00392156862745099,
                1.12757025938492E-17, 2.02615701994091E-15,
                1.4432899320127E-15, -1, 0, 0.999992265120898,
                6.06657187818271E-08
            };

            // string str = actual.ToString(Math.Formats.CSharpArrayFormatProvider.InvariantCulture);

            replacedert.AreEqual(26, actual.Length);
            for (int i = 0; i < actual.Length; i++)
                replacedert.AreEqual(expected[i], actual[i], System.Math.Abs(expected[i]) * 1e-10);
        }

19 Source : BitmapSourceToArrayTest.cs
with MIT License
from PacktPublishing

[Test]
        public void ConvertTest6()
        {
            var target = new BitmapSourceToArray();

            double[] input =
            {
                0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.42, 0.42, 0.42, 0.42, 0.42, // 0
                0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, // 1
            };

            BitmapSource image = input.ToBitmapSource(2, 16);
            replacedert.AreEqual(PixelFormats.Gray32Float, image.Format);

            replacedert.AreEqual(1, image.GetNumberOfChannels());

            double[] output;
            target.Channel = RGB.R;
            target.Convert(image, out output);

            for (int i = 0; i < input.Length; i++)
                replacedert.AreEqual(input[i], output[i], 1e-5);
        }

19 Source : NaiveBayes`1Test.cs
with MIT License
from PacktPublishing

[Test]
        public void ComputeTest()
        {
            DataTable data = new DataTable("Mitchell's Tennis Example");

            data.Columns.Add("Day", "Outlook", "Temperature", "Humidity", "Wind", "PlayTennis");

            data.Rows.Add("D1", "Sunny", "Hot", "High", "Weak", "No");
            data.Rows.Add("D2", "Sunny", "Hot", "High", "Strong", "No");
            data.Rows.Add("D3", "Overcast", "Hot", "High", "Weak", "Yes");
            data.Rows.Add("D4", "Rain", "Mild", "High", "Weak", "Yes");
            data.Rows.Add("D5", "Rain", "Cool", "Normal", "Weak", "Yes");
            data.Rows.Add("D6", "Rain", "Cool", "Normal", "Strong", "No");
            data.Rows.Add("D7", "Overcast", "Cool", "Normal", "Strong", "Yes");
            data.Rows.Add("D8", "Sunny", "Mild", "High", "Weak", "No");
            data.Rows.Add("D9", "Sunny", "Cool", "Normal", "Weak", "Yes");
            data.Rows.Add("D10", "Rain", "Mild", "Normal", "Weak", "Yes");
            data.Rows.Add("D11", "Sunny", "Mild", "Normal", "Strong", "Yes");
            data.Rows.Add("D12", "Overcast", "Mild", "High", "Strong", "Yes");
            data.Rows.Add("D13", "Overcast", "Hot", "Normal", "Weak", "Yes");
            data.Rows.Add("D14", "Rain", "Mild", "High", "Strong", "No");

            // Create a new codification codebook to
            // convert strings into discrete symbols
            Codification codebook = new Codification(data,
                "Outlook", "Temperature", "Humidity", "Wind", "PlayTennis");

            int clreplacedCount = codebook["PlayTennis"].Symbols; // 2 possible values (yes, no)
            int inputCount = 4; // 4 variables (Outlook, Temperature, Humidity, Wind)

            GeneralDiscreteDistribution[] priors =
            {
                new GeneralDiscreteDistribution(codebook["Outlook"].Symbols),     // 3 possible values (Sunny, overcast, rain)
                new GeneralDiscreteDistribution(codebook["Temperature"].Symbols), // 3 possible values (Hot, mild, cool)
                new GeneralDiscreteDistribution(codebook["Humidity"].Symbols),    // 2 possible values (High, normal)
                new GeneralDiscreteDistribution(codebook["Wind"].Symbols)         // 2 possible values (Weak, strong)
            };

            // Create a new Naive Bayes clreplacedifiers for the two clreplacedes
            var target = new NaiveBayes<GeneralDiscreteDistribution>(clreplacedCount, inputCount, priors);

            // Extract symbols from data and train the clreplacedifier
            DataTable symbols = codebook.Apply(data);
            double[][] inputs = symbols.ToArray("Outlook", "Temperature", "Humidity", "Wind");
            int[] outputs = symbols.ToArray<int>("PlayTennis");

            // Compute the Naive Bayes model
            target.Estimate(inputs, outputs);


            double logLikelihood;
            double[] responses;

            // Compute the result for a sunny, cool, humid and windy day:
            double[] instance = codebook.Translate("Sunny", "Cool", "High", "Strong").ToDouble();

            int c = target.Compute(instance, out logLikelihood, out responses);

            string result = codebook.Translate("PlayTennis", c);

            replacedert.AreEqual("No", result);
            replacedert.AreEqual(0, c);
            replacedert.AreEqual(0.795, responses[0], 1e-3);
            replacedert.AreEqual(1, responses.Sum(), 1e-10);
            replacedert.IsFalse(double.IsNaN(responses[0]));
            replacedert.AreEqual(2, responses.Length);
        }

19 Source : NaiveBayes`1Test.cs
with MIT License
from PacktPublishing

[Test]
        public void gh_758()
        {
            // Let's say we have the following data to be clreplacedified into three 
            // non -mutually-exclusive possible clreplacedes. Those are the samples:
            //
            double[][] inputs =
            {
                //               input         output
                new double[] { 0, 1, 1, 0 }, //  0 
                new double[] { 0, 1, 0, 0 }, //  0
                new double[] { 0, 0, 1, 0 }, //  0
                new double[] { 0, 1, 1, 0 }, //  0, 1
                new double[] { 0, 1, 0, 0 }, //  0, 1
                new double[] { 1, 0, 0, 0 }, //     1
                new double[] { 1, 0, 0, 0 }, //     1
                new double[] { 1, 0, 0, 1 }, //     1, 2
                new double[] { 0, 0, 0, 1 }, //     1, 2
                new double[] { 0, 0, 0, 1 }, //     1, 2
                new double[] { 1, 1, 1, 1 }, //        2
                new double[] { 1, 0, 1, 1 }, //        2
                new double[] { 1, 1, 0, 1 }, //        2
                new double[] { 0, 1, 1, 1 }, //        2
                new double[] { 1, 1, 1, 1 }, //        2
            };

            int[][] outputs = // those are the clreplaced labels
            {
                new[] { 1, 0, 0 },
                new[] { 1, 0, 0 },
                new[] { 1, 0, 0 },
                new[] { 1, 1, 0 },
                new[] { 1, 1, 0 },
                new[] { 0, 1, 0 },
                new[] { 0, 1, 0 },
                new[] { 0, 1, 1 },
                new[] { 0, 1, 1 },
                new[] { 0, 1, 1 },
                new[] { 0, 0, 1 },
                new[] { 0, 0, 1 },
                new[] { 0, 0, 1 },
                new[] { 0, 0, 1 },
                new[] { 0, 0, 1 },
            };

            // Create a new Naive teacher for 4-dimensional Gaussian distributions
            var teacher = new NaiveBayesLearning<NormalDistribution, NormalOptions, double>()
            {
                Options = new IndependentOptions<NormalOptions>()
                {
                    InnerOption = new NormalOptions()
                    {
                        Regularization = 1e-10
                    }
                }
            };

            teacher.ParallelOptions.MaxDegreeOfParallelism = 1;

            var bayes = teacher.Learn(inputs, outputs);

            double[][] prediction = bayes.Probabilities(inputs);

            // Teach the Naive Bayes model. The error should be zero:
            double error = new BinaryCrossEntropyLoss(outputs).Loss(prediction);

            replacedert.AreEqual(78.465768833015233, error, 1e-8);

            replacedert.IsTrue(teacher.optimized);
        }

19 Source : BalancedKMeansTest.cs
with MIT License
from PacktPublishing

[Test]
        public void distances_test()
        {
            int numClusters = 6;

            double[][] observations =
            {
                new double[] {  10.8,   18.706148721743876 },
                new double[] { -10.8,   18.706148721743876 },
                new double[] { -21.6,   0.0 },
                new double[] { -10.8, -18.706148721743876 },
                new double[] {  10.8, -18.706148721743876 },
                new double[] {  21.6,   0.0 },
                new double[] {  32.4,  18.706148721743876 },
                new double[] {  21.6,  37.412297443487752 },
                new double[] {   0.0,  37.412297443487752 },
                new double[] { -21.6,  37.412297443487752 },
                new double[] { -32.4,  18.706148721743876 },
                new double[] { -43.2,   0.0 },
                new double[] { -32.4, -18.706148721743876 },
                new double[] { -21.6, -37.412297443487752 },
                new double[] {   0.0, -37.412297443487752 },
                new double[] {  21.6, -37.412297443487752 },
                new double[] {  32.4, -18.706148721743876 },
                new double[] {  43.2,   0.0 }
            };

            var distance = new SquareEuclidean();
            var centroids = new[]
            {
                new[] {   0.00000,   37.41230 },
                new[] { -32.40000,  -18.70615 },
                new[] {  10.80000,   18.70615 },
                new[] { -21.60000,   37.41230 },
                new[] { -10.80000,  -18.70615 },
                new[] {  21.60000,  -37.41230 },
            };

            double[,] expectedDistances =
            {
                { 3.2659e+003, 1.8662e+003, 4.6656e+002, 4.6656e+002, 1.8662e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 4.1990e+003, 3.2659e+003, 1.3997e+003, 4.6656e+002, 1.6339e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 6.0653e+003 },
                { 1.6339e-012, 4.6656e+002, 1.3997e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 4.6656e+002, 4.6656e+002, 4.6656e+002, 1.3997e+003, 1.8662e+003, 3.2659e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003 },
                { 1.3997e+003, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.8662e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 7.4650e+003, 6.0653e+003, 5.5987e+003 },
                { 1.8662e+003, 1.3997e+003, 4.6656e+002, 1.6339e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 4.6656e+002, 4.6656e+002, 1.3997e+003, 1.8662e+003, 3.2659e+003 },
                { 3.2659e+003, 4.1990e+003, 3.2659e+003, 1.3997e+003, 4.6656e+002, 1.3997e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 7.4650e+003, 6.0653e+003, 5.5987e+003, 3.2659e+003, 1.8662e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.8662e+003 },
                { 4.6656e+002, 4.6656e+002, 1.8662e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 6.0653e+003, 5.5987e+003, 6.0653e+003, 4.1990e+003, 3.2659e+003 },
                { 3.2659e+003, 1.8662e+003, 4.6656e+002, 4.6656e+002, 1.8662e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 4.1990e+003, 3.2659e+003, 1.3997e+003, 4.6656e+002, 1.6339e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 6.0653e+003 },
                { 1.6339e-012, 4.6656e+002, 1.3997e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 4.6656e+002, 4.6656e+002, 4.6656e+002, 1.3997e+003, 1.8662e+003, 3.2659e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003 },
                { 1.3997e+003, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.8662e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 7.4650e+003, 6.0653e+003, 5.5987e+003 },
                { 1.8662e+003, 1.3997e+003, 4.6656e+002, 1.6339e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 4.6656e+002, 4.6656e+002, 1.3997e+003, 1.8662e+003, 3.2659e+003 },
                { 3.2659e+003, 4.1990e+003, 3.2659e+003, 1.3997e+003, 4.6656e+002, 1.3997e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 7.4650e+003, 6.0653e+003, 5.5987e+003, 3.2659e+003, 1.8662e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.8662e+003 },
                { 4.6656e+002, 4.6656e+002, 1.8662e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 6.0653e+003, 5.5987e+003, 6.0653e+003, 4.1990e+003, 3.2659e+003 },
                { 3.2659e+003, 1.8662e+003, 4.6656e+002, 4.6656e+002, 1.8662e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 4.1990e+003, 3.2659e+003, 1.3997e+003, 4.6656e+002, 1.6339e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 6.0653e+003 },
                { 1.6339e-012, 4.6656e+002, 1.3997e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 4.6656e+002, 4.6656e+002, 4.6656e+002, 1.3997e+003, 1.8662e+003, 3.2659e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003 },
                { 1.3997e+003, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.8662e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 7.4650e+003, 6.0653e+003, 5.5987e+003 },
                { 1.8662e+003, 1.3997e+003, 4.6656e+002, 1.6339e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 4.6656e+002, 4.6656e+002, 1.3997e+003, 1.8662e+003, 3.2659e+003 },
                { 3.2659e+003, 4.1990e+003, 3.2659e+003, 1.3997e+003, 4.6656e+002, 1.3997e+003, 3.2659e+003, 5.5987e+003, 6.0653e+003, 7.4650e+003, 6.0653e+003, 5.5987e+003, 3.2659e+003, 1.8662e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.8662e+003 },
                { 4.6656e+002, 4.6656e+002, 1.8662e+003, 3.2659e+003, 3.2659e+003, 1.8662e+003, 1.3997e+003, 4.6656e+002, 6.5358e-012, 4.6656e+002, 1.3997e+003, 3.2659e+003, 4.1990e+003, 6.0653e+003, 5.5987e+003, 6.0653e+003, 4.1990e+003, 3.2659e+003 }
            };

            double[][] actualDistances = BalancedKMeans.GetDistances(distance, observations, centroids, numClusters, Jagged.Zeros(18, 18));

            for (int i = 0; i < actualDistances.Length; i++)
            {
                for (int j = 0; j < actualDistances[i].Length; j++)
                {
                    double a = actualDistances[i][j];
                    double e = expectedDistances[i, j];
                    replacedert.AreEqual(e, a, 0.1);
                }
            }
        }

19 Source : HaralickTest.cs
with MIT License
from PacktPublishing

[Test]
        public void ComputeTest2()
        {
            int size = 255;
            UnmanagedImage output = createGradient(size);

            Haralick haralick = new Haralick()
            {
                Mode = HaralickMode.Combine
            };

            replacedert.AreEqual(13, haralick.Features);
            replacedert.AreEqual(4, haralick.Degrees.Count);

            List<double[]> result = haralick.ProcessImage(output);

            GrayLevelCooccurrenceMatrix glcm = haralick.Matrix;
            HaralickDescriptorDictionary[,] features = haralick.Descriptors;

            replacedert.AreEqual(1, features.GetLength(0));
            replacedert.AreEqual(1, features.GetLength(1));


            replacedert.AreEqual(1, result.Count);
            double[] actual = result[0];
            double[] expected =
            {
                0.00393700787401572, 0.999999999999998, 353791227646.996,
                126.499984621299, 0.499999999999999, 254, 83238.6962631393,
                5.53733426701852, 5.53733426701852, 0.003921568627451,
                1.66533453693773E-15, -1, 0.999992249954468, 0.00393700787401572,
                0.999999999999998, 353791227646.996, 126.499984621299,
                0.499999999999999, 254, 83238.6962631393, 5.53733426701852,
                5.53733426701852, 0.003921568627451, 1.66533453693773E-15,
                -1, 0.999992249954468, 0.00392156862745099, 0, 1.45558373073433E+38,
                126.999984621299, 0.999999999999997, 254, 83406.410387403,
                5.54126354515845, 5.54126354515845, 0.00392156862745098,
                3.10862446895043E-15, -1, 0.999992310620187, 0.00393700787401572,
                0.999999999999998, 353791227646.996, 127.499984621299,
                0.499999999999999, 254, 83238.6962631393, 5.53733426701852,
                5.53733426701852, 0.003921568627451, 1.66533453693773E-15,
                -1, 0.999992249954468
            };

            string str = actual.ToString(Accord.Math.CSharpArrayFormatProvider.InvariantCulture);
            replacedert.IsNotNull(str);

            replacedert.AreEqual(52, actual.Length);
            for (int i = 0; i < actual.Length; i++)
                replacedert.AreEqual(expected[i], actual[i], System.Math.Abs(expected[i]) * 1e-10);
        }

19 Source : LocalBinaryPatternsTest.cs
with MIT License
from PacktPublishing

[Test]
        public void doc_test()
        {
            string localPath = TestContext.CurrentContext.TestDirectory;

            #region doc_apply
            // Let's load an example image, such as Lena,
            // from a standard dataset of example images:
            var images = new TestImages(path: localPath);
            Bitmap lena = images["lena.bmp"];

            // Create a new Local Binary Pattern with default values:
            var lbp = new LocalBinaryPattern(blockSize: 3, cellSize: 6);

            // Use it to extract descriptors from the Lena image:
            List<double[]> descriptors = lbp.ProcessImage(lena);

            // Now those descriptors can be used to represent the image itself, such
            // as for example, in the Bag-of-Visual-Words approach for clreplacedification.
            #endregion

            replacedert.AreEqual(784, descriptors.Count);
            double sum = descriptors.Sum(x => x.Sum());
            replacedert.AreEqual(6094.543992693033, sum, 1e-10);
        }

19 Source : SparseReaderTest.cs
with MIT License
from PacktPublishing

[Test]
        public void ReadSampleTest()
        {
            // http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclreplaced.html#iris

            MemoryStream file = new MemoryStream(
                Encoding.Default.GetBytes(Accord.Tests.IO.Properties.Resources.iris_scale));

            // Suppose we are going to read a sparse sample file containing
            //  samples which have an actual dimension of 4. Since the samples
            //  are in a sparse format, each entry in the file will probably
            //  have a much lesser number of elements.
            int sampleSize = 4;

            // Create a new Sparse Sample Reader to read any given file,
            //  preplaceding the correct dense sample size in the constructor
            SparseReader reader = new SparseReader(stream: file, encoding: Encoding.Default, sampleSize: sampleSize);

            // Declare some variables to receive each current sample
            int label = 0;
            string description;
            double[] sample;

            // Read a sample from the file
            var r = reader.ReadDense();
            sample = r.Item1;
            label = (int)r.Item2;
            description = reader.SampleDescriptions[0];

            replacedert.AreEqual(1, label);
            replacedert.AreEqual(String.Empty, description);

            replacedert.AreEqual(4, sample.Length);
            replacedert.AreEqual(-0.555556, sample[0], 0.0001);
            replacedert.AreEqual(+0.250000, sample[1], 0.0001);
            replacedert.AreEqual(-0.864407, sample[2], 0.0001);
            replacedert.AreEqual(-0.916667, sample[3], 0.0001);

            var s = reader.ReadSparse();
            sample = s.Item1.ToSparse();
            label = (int)s.Item2;
            description = reader.SampleDescriptions[0];

            replacedert.AreEqual(1, label);
            replacedert.AreEqual(String.Empty, description);

            replacedert.AreEqual(8, sample.Length);
            replacedert.AreEqual(0, sample[0], 0.0001);
            replacedert.AreEqual(-0.666667, sample[1], 0.0001);
            replacedert.AreEqual(1, sample[2], 0.0001);
            replacedert.AreEqual(-0.166667, sample[3], 0.0001);
            replacedert.AreEqual(2, sample[4], 0.0001);
            replacedert.AreEqual(-0.864407, sample[5], 0.0001);
            replacedert.AreEqual(3, sample[6], 0.0001);
            replacedert.AreEqual(-0.916667, sample[7], 0.0001);


            int count = 2;

            // Read all samples from the file
            while (!reader.EndOfStream)
            {
                reader.SampleDescriptions.Clear();
                r = reader.ReadDense();
                sample = r.Item1;
                label = (int)r.Item2;
                description = reader.SampleDescriptions[0];
                replacedert.IsTrue(label >= 0 && label <= 3);
                replacedert.IsTrue(description == String.Empty);
                replacedert.AreEqual(4, sample.Length);
                count++;
            }

            replacedert.AreEqual(150, count);
        }

19 Source : NaiveBayes`1Test.cs
with MIT License
from PacktPublishing

[Test]
        public void ComputeTest2()
        {
            DataTable data = new DataTable("Mitchell's Tennis Example");

            data.Columns.Add("Day", "Outlook", "Temperature", "Humidity", "Wind", "PlayTennis");

            // We will set Temperature and Humidity to be continuous
            data.Columns["Temperature"].DataType = typeof(double);
            data.Columns["Humidity"].DataType = typeof(double);

            data.Rows.Add("D1", "Sunny", 38.0, 96.0, "Weak", "No");
            data.Rows.Add("D2", "Sunny", 39.0, 90.0, "Strong", "No");
            data.Rows.Add("D3", "Overcast", 38.0, 75.0, "Weak", "Yes");
            data.Rows.Add("D4", "Rain", 25.0, 87.0, "Weak", "Yes");
            data.Rows.Add("D5", "Rain", 12.0, 30.0, "Weak", "Yes");
            data.Rows.Add("D6", "Rain", 11.0, 35.0, "Strong", "No");
            data.Rows.Add("D7", "Overcast", 10.0, 40.0, "Strong", "Yes");
            data.Rows.Add("D8", "Sunny", 24.0, 90.0, "Weak", "No");
            data.Rows.Add("D9", "Sunny", 12.0, 26.0, "Weak", "Yes");
            data.Rows.Add("D10", "Rain", 25, 30.0, "Weak", "Yes");
            data.Rows.Add("D11", "Sunny", 26.0, 40.0, "Strong", "Yes");
            data.Rows.Add("D12", "Overcast", 27.0, 97.0, "Strong", "Yes");
            data.Rows.Add("D13", "Overcast", 39.0, 41.0, "Weak", "Yes");
            data.Rows.Add("D14", "Rain", 23.0, 98.0, "Strong", "No");

            // Create a new codification codebook to
            // convert strings into discrete symbols
            Codification codebook = new Codification(data);

            int clreplacedCount = codebook["PlayTennis"].Symbols; // 2 possible values (yes, no)
            int inputCount = 4; // 4 variables (Outlook, Temperature, Humidity, Wind)

            IUnivariateFittableDistribution[] priors =
            {
                new GeneralDiscreteDistribution(codebook["Outlook"].Symbols),   // 3 possible values (Sunny, overcast, rain)
                new NormalDistribution(),                                       // Continuous value (Celsius)
                new NormalDistribution(),                                       // Continuous value (percentage)
                new GeneralDiscreteDistribution(codebook["Wind"].Symbols)       // 2 possible values (Weak, strong)
            };

            // Create a new Naive Bayes clreplacedifiers for the two clreplacedes
            var target = new NaiveBayes<IUnivariateFittableDistribution>(clreplacedCount, inputCount, priors);

            // Extract symbols from data and train the clreplacedifier
            DataTable symbols = codebook.Apply(data);
            double[][] inputs = symbols.ToArray("Outlook", "Temperature", "Humidity", "Wind");
            int[] outputs = symbols.ToArray<int>("PlayTennis");

            // Compute the Naive Bayes model
            target.Estimate(inputs, outputs);


            double logLikelihood;
            double[] responses;

            // Compute the result for a sunny, cool, humid and windy day:
            double[] instance = new double[]
            {
                codebook.Translate(columnName:"Outlook", value:"Sunny"),
                12.0,
                90.0,
                codebook.Translate(columnName:"Wind", value:"Strong")
            };

            int c = target.Compute(instance, out logLikelihood, out responses);

            string result = codebook.Translate("PlayTennis", c);

            replacedert.AreEqual("No", result);
            replacedert.AreEqual(0, c);
            replacedert.AreEqual(0.840, responses[0], 1e-3);
            replacedert.AreEqual(1, responses.Sum(), 1e-10);
            replacedert.IsFalse(double.IsNaN(responses[0]));
            replacedert.AreEqual(2, responses.Length);

            int c2 = target.Compute(instance, out logLikelihood);

            replacedert.AreEqual(c, c2);
        }

19 Source : BalancedKMeansTest.cs
with MIT License
from PacktPublishing

[Test]
        public void getlabels()
        {
            // https://github.com/accord-net/framework/issues/451

            int numClusters = 6;

            double[][] observations =
            {
                new double[] {  10.8,   18.706148721743876 },
                new double[] { -10.8,   18.706148721743876 },
                new double[] { -21.6,   0.0 },
                new double[] { -10.8, -18.706148721743876 },
                new double[] {  10.8, -18.706148721743876 },
                new double[] {  21.6,   0.0 },
                new double[] {  32.4,  18.706148721743876 },
                new double[] {  21.6,  37.412297443487752 },
                new double[] {   0.0,  37.412297443487752 },
                new double[] { -21.6,  37.412297443487752 },
                new double[] { -32.4,  18.706148721743876 },
                new double[] { -43.2,   0.0 },
                new double[] { -32.4, -18.706148721743876 },
                new double[] { -21.6, -37.412297443487752 },
                new double[] {   0.0, -37.412297443487752 },
                new double[] {  21.6, -37.412297443487752 },
                new double[] {  32.4, -18.706148721743876 },
                new double[] {  43.2,   0.0 }
            };

            double[] solution = (new double[] { 3, 18, 2, 4, 17, 8, 12, 6, 10, 14, 15, 9, 13, 7, 11, 5, 16, 1 }).Subtract(1);

            int[] actualLabels = new int[observations.Length];
            BalancedKMeans.GetLabels(observations, numClusters, solution, actualLabels);

            int[] expectedLabels = (new int[] { 1, 4, 2, 5, 5, 3, 3, 1, 1, 4, 4, 2, 2, 5, 6, 6, 6, 3 }).Subtract(1);
            for (int j = 0; j < actualLabels.Length; j++)
            {
                double a = actualLabels[j];
                double e = expectedLabels[j];
                replacedert.AreEqual(e, a, 0.1);
            }
        }

19 Source : GaussianMixtureModelTest.cs
with MIT License
from PacktPublishing

[Test]
        public void GaussianMixtureModelConstructorTest()
        {
            Accord.Math.Tools.SetupGenerator(0);

            // Test Samples
            double[][] samples =
            {
                new double[] { 0, 1 },
                new double[] { 1, 2 },
                new double[] { 1, 1 },
                new double[] { 0, 7 },
                new double[] { 1, 1 },
                new double[] { 6, 2 },
                new double[] { 6, 5 },
                new double[] { 5, 1 },
                new double[] { 7, 1 },
                new double[] { 5, 1 }
            };

            double[] sample = samples[0];


            // Create a new Gaussian Mixture Model with 2 components
            GaussianMixtureModel gmm = new GaussianMixtureModel(2);

            // Compute the model (estimate)
            double ll = gmm.Compute(samples, 0.0001);
            replacedert.AreEqual(-35.930732550698494, ll, 1e-10);

            replacedert.AreEqual(2, gmm.Gaussians.Count);

            replacedert.IsTrue(gmm.Gaussians.Means[0].IsEqual(new[] { 5.8, 2.0 }, 1e-3));
            replacedert.IsTrue(gmm.Gaussians.Means[1].IsEqual(new[] { 0.6, 2.4 }, 1e-3));


            int[] c = samples.Apply(gmm.Clusters.Decide);

            for (int i = 0; i < samples.Length; i++)
            {
                double[] responses;
                int e;
                responses = gmm.Gaussians.Probabilities(samples[i], out e);
                int a = responses.ArgMax();

                replacedert.AreEqual(a, e);
                replacedert.AreEqual(c[i], (i < 5) ? 1 : 0);
            }
        }

19 Source : ErrorBasedPruningTest.cs
with MIT License
from PacktPublishing

[Test]
        public void RunTest()
        {
            Accord.Math.Random.Generator.Seed = 0;

            double[][] inputs;
            int[] outputs;

            int trainingSamplesCount = 6000;
            DecisionTree tree = ReducedErrorPruningTest.createNurseryExample(out inputs, out outputs, trainingSamplesCount);

            int nodeCount = 0;
            foreach (var node in tree)
                nodeCount++;

            var pruningInputs = inputs.Submatrix(trainingSamplesCount, inputs.Length - 1);
            var pruningOutputs = outputs.Submatrix(trainingSamplesCount, inputs.Length - 1);
            ErrorBasedPruning prune = new ErrorBasedPruning(tree, pruningInputs, pruningOutputs);

            prune.Threshold = 0.1;

            double lastError, error = Double.PositiveInfinity;
            do
            {
                lastError = error;
                error = prune.Run();
            } while (error < lastError);

            int nodeCount2 = 0;
            foreach (var node in tree)
                nodeCount2++;

            replacedert.AreEqual(0.28922413793103446, error, 5e-4);
            replacedert.AreEqual(447, nodeCount);
            replacedert.AreEqual(424, nodeCount2);
        }

19 Source : SpeededUpRobustFeaturesDescriptorTest.cs
with MIT License
from PacktPublishing

[Test]
        [Category("Slow")]
        public void ProcessImageTest()
        {
            var bitmaps = SpeededUpRobustFeaturesDetectorTest.GetImages();

            foreach (Bitmap img in bitmaps)
            {

                bool upright = true;
                bool extended = false;

                List<SpeededUpRobustFeaturePoint> expected;
                List<SpeededUpRobustFeaturePoint> actual;

                // Create OpenSURF detector by Chris Evans
                {
                    // Create Integral Image
                    var clone = Accord.Imaging.Image.Clone(img);
                    OpenSURFcs.IntegralImage iimg = OpenSURFcs.IntegralImage.FromImage(clone);

                    // Extract the interest points
                    var pts = OpenSURFcs.FastHessian.getIpoints(0.0002f, 5, 2, iimg);

                    // Describe the interest points
                    OpenSURFcs.SurfDescriptor.DecribeInterestPoints(pts, upright, extended, iimg);

                    expected = new List<SpeededUpRobustFeaturePoint>();
                    foreach (var p in pts)
                    {
                        expected.Add(new SpeededUpRobustFeaturePoint(
                            p.x, p.y, p.scale,
                            p.laplacian, p.orientation,
                            p.response, p.descriptor.ToDouble()));
                    }
                }

                {
                    // Create the detector
                    var surf = new SpeededUpRobustFeaturesDetector(0.0002f, 5, 2);

                    // Extract interest points
                    var clone = Accord.Imaging.Image.Clone(img);
                    actual = surf.ProcessImage(clone);

                    // Describe the interest points
                    var descriptor = surf.GetDescriptor();
                    descriptor.Invariant = !upright;
                    descriptor.Extended = extended;

                    foreach (var expectedPoint in expected)
                    {
                        var actualPoint = new SpeededUpRobustFeaturePoint(
                            expectedPoint.X,
                            expectedPoint.Y,
                            expectedPoint.Scale,
                            expectedPoint.Laplacian);

                        descriptor.Compute(actualPoint);

                        replacedert.AreEqual(expectedPoint.X, actualPoint.X);
                        replacedert.AreEqual(expectedPoint.Y, actualPoint.Y);
                        replacedert.AreEqual(expectedPoint.Scale, actualPoint.Scale);
                        replacedert.AreEqual(expectedPoint.Orientation, actualPoint.Orientation);
                        replacedert.AreEqual(expectedPoint.Response, actualPoint.Response);
                        replacedert.AreEqual(expectedPoint.Descriptor.Length, actualPoint.Descriptor.Length);

                        for (int i = 0; i < expectedPoint.Descriptor.Length; i++)
                        {
                            double e = expectedPoint.Descriptor[i];
                            double a = actualPoint.Descriptor[i];

                            double u = System.Math.Abs(e - a);
                            double v = System.Math.Abs(e);
                            replacedert.AreEqual(e, a, 0.05);
                        }
                    }
                }
            }

        }

19 Source : SpeededUpRobustFeaturesDetectorTest.cs
with MIT License
from PacktPublishing

[Test]
        public void doc_test()
        {
            string localPath = TestContext.CurrentContext.TestDirectory;

            #region doc_apply
            // Let's load an example image, such as Lena,
            // from a standard dataset of example images:
            var images = new TestImages(path: localPath);
            Bitmap lena = images["lena.bmp"];

            // Create a new SURF with the default parameter values:
            var surf = new SpeededUpRobustFeaturesDetector(threshold: 0.0002f, octaves: 5, initial: 2);

            // Use it to extract the SURF point descriptors from the Lena image:
            List<SpeededUpRobustFeaturePoint> descriptors = surf.ProcessImage(lena);

            // We can obtain the actual double[] descriptors using
            double[][] features = descriptors.Apply(d => d.Descriptor);

            // Now those descriptors can be used to represent the image itself, such
            // as for example, in the Bag-of-Visual-Words approach for clreplacedification.
            #endregion

            replacedert.AreEqual(523, descriptors.Count);
            double sum = features.Sum(x => x.Sum());
            replacedert.AreEqual(2340.9402310500964, sum, 1e-10);
        }

19 Source : ToolsTest.cs
with MIT License
from PacktPublishing

[Test]
        public void NormalizeTest()
        {
            PointH[] points = new PointH[]
            {
                new PointH(1, 2),
                new PointH(5, 2),
                new PointH(12, 2),
                new PointH(1, 2),
                new PointH(10, 2),
            };

            MatrixH T;
            PointH[] actual = Tools.Normalize(points, out T);


            // Centroids should be at the origin
            double cx = 0, cy = 0;
            for (int i = 0; i < actual.Length; i++)
            {
                cx += actual[i].X / actual[i].W;
                cy += actual[i].Y / actual[i].W;
            }
            replacedert.AreEqual(cx / actual.Length, 0, 0.0000001);
            replacedert.AreEqual(cy / actual.Length, 0, 0.0000001);

            // Average distance from the origin should be sqrt(2)
            double d = 0;
            for (int i = 0; i < actual.Length; i++)
            {
                double x = actual[i].X / actual[i].W;
                double y = actual[i].Y / actual[i].W;

                d += System.Math.Sqrt(x * x + y * y);
            }
            replacedert.AreEqual(d / actual.Length, System.Math.Sqrt(2), 0.00001);


        }

19 Source : NaiveBayes`1Test.cs
with MIT License
from PacktPublishing

[Test]
        public void learn_test_mitchell()
        {
            #region doc_mitchell_1
            // We will represent Mitchell's Tennis example using a DataTable. However,
            // the use of a DataTable is not required in order to use the Naive Bayes. 
            // Please take a look at the other examples below for simpler approaches.
            DataTable data = new DataTable("Mitchell's Tennis Example");
            data.Columns.Add("Day", "Outlook", "Temperature", "Humidity", "Wind", "PlayTennis");
            // We will set Temperature and Humidity to be continuous
            data.Columns["Temperature"].DataType = typeof(double);
            data.Columns["Humidity"].DataType = typeof(double);
            // Add some data
            data.Rows.Add("D1", "Sunny", 38.0, 96.0, "Weak", "No");
            data.Rows.Add("D2", "Sunny", 39.0, 90.0, "Strong", "No");
            data.Rows.Add("D3", "Overcast", 38.0, 75.0, "Weak", "Yes");
            data.Rows.Add("D4", "Rain", 25.0, 87.0, "Weak", "Yes");
            data.Rows.Add("D5", "Rain", 12.0, 30.0, "Weak", "Yes");
            data.Rows.Add("D6", "Rain", 11.0, 35.0, "Strong", "No");
            data.Rows.Add("D7", "Overcast", 10.0, 40.0, "Strong", "Yes");
            data.Rows.Add("D8", "Sunny", 24.0, 90.0, "Weak", "No");
            data.Rows.Add("D9", "Sunny", 12.0, 26.0, "Weak", "Yes");
            data.Rows.Add("D10", "Rain", 25, 30.0, "Weak", "Yes");
            data.Rows.Add("D11", "Sunny", 26.0, 40.0, "Strong", "Yes");
            data.Rows.Add("D12", "Overcast", 27.0, 97.0, "Strong", "Yes");
            data.Rows.Add("D13", "Overcast", 39.0, 41.0, "Weak", "Yes");
            data.Rows.Add("D14", "Rain", 23.0, 98.0, "Strong", "No");
            #endregion

            #region doc_mitchell_2
            // Create a new codification codebook to
            // convert strings into discrete symbols
            Codification codebook = new Codification(data);
            #endregion

            #region doc_mitchell_3
            // Some distributions require constructor parameters, and as such, cannot 
            // be automatically initialized by the learning algorithm. For this reason, 
            // we might need to specify how each component should be initialized:
            IUnivariateFittableDistribution[] priors =
            {
                new GeneralDiscreteDistribution(codebook["Outlook"].Symbols),   // 3 possible values (Sunny, overcast, rain)
                new NormalDistribution(),                                       // Continuous value (Celsius)
                new NormalDistribution(),                                       // Continuous value (percentage)
                new GeneralDiscreteDistribution(codebook["Wind"].Symbols)       // 2 possible values (Weak, strong)
            };

            // Create a new Naive Bayes clreplacedifiers for the two clreplacedes
            var learner = new NaiveBayesLearning<IUnivariateFittableDistribution>()
            {
                // Tell the learner how to initialize the distributions
                Distribution = (clreplacedIndex, variableIndex) => priors[variableIndex]
            };

            // Extract symbols from data and train the clreplacedifier
            DataTable symbols = codebook.Apply(data);
            double[][] inputs = symbols.ToArray("Outlook", "Temperature", "Humidity", "Wind");
            int[] outputs = symbols.ToArray<int>("PlayTennis");

            // Learn the Naive Bayes model
            var naiveBayes = learner.Learn(inputs, outputs);
            #endregion

            #region doc_mitchell_4
            // Create an instance representing a "sunny, cool, humid and windy day":
            double[] instance = new double[]
            {
                codebook.Translate(columnName:"Outlook", value:"Sunny"), //n 0
                12.0,
                90.0,
                codebook.Translate(columnName:"Wind", value:"Strong") // 1
            };

            // We can obtain a clreplaced prediction using
            int predicted = naiveBayes.Decide(instance);

            // Or compute probabilities of each clreplaced using
            double[] probabilities = naiveBayes.Probabilities(instance);

            // Or obtain the log-likelihood of prediction
            double ll = naiveBayes.LogLikelihood(instance);

            // Finally, the result can be translated back using
            string result = codebook.Translate("PlayTennis", predicted); // Should be "No"
            #endregion

            replacedert.AreEqual("No", result);
            replacedert.AreEqual(0, predicted);
            replacedert.AreEqual(0.840, probabilities[0], 1e-3);
            replacedert.AreEqual(-10.493243476691351, ll, 1e-6);
            replacedert.AreEqual(1, probabilities.Sum(), 1e-10);
            replacedert.AreEqual(2, probabilities.Length);
        }

19 Source : NaiveBayes`1Test.cs
with MIT License
from PacktPublishing

[Test]
        public void SerializationTest()
        {
            DataTable data = new DataTable("Mitchell's Tennis Example");

            data.Columns.Add("Day", "Outlook", "Temperature", "Humidity", "Wind", "PlayTennis");

            data.Rows.Add("D1", "Sunny", "Hot", "High", "Weak", "No");
            data.Rows.Add("D2", "Sunny", "Hot", "High", "Strong", "No");
            data.Rows.Add("D3", "Overcast", "Hot", "High", "Weak", "Yes");
            data.Rows.Add("D4", "Rain", "Mild", "High", "Weak", "Yes");
            data.Rows.Add("D5", "Rain", "Cool", "Normal", "Weak", "Yes");
            data.Rows.Add("D6", "Rain", "Cool", "Normal", "Strong", "No");
            data.Rows.Add("D7", "Overcast", "Cool", "Normal", "Strong", "Yes");
            data.Rows.Add("D8", "Sunny", "Mild", "High", "Weak", "No");
            data.Rows.Add("D9", "Sunny", "Cool", "Normal", "Weak", "Yes");
            data.Rows.Add("D10", "Rain", "Mild", "Normal", "Weak", "Yes");
            data.Rows.Add("D11", "Sunny", "Mild", "Normal", "Strong", "Yes");
            data.Rows.Add("D12", "Overcast", "Mild", "High", "Strong", "Yes");
            data.Rows.Add("D13", "Overcast", "Hot", "Normal", "Weak", "Yes");
            data.Rows.Add("D14", "Rain", "Mild", "High", "Strong", "No");

            // Create a new codification codebook to
            // convert strings into discrete symbols
            Codification codebook = new Codification(data,
                "Outlook", "Temperature", "Humidity", "Wind", "PlayTennis");

            string fileName = Path.Combine(TestContext.CurrentContext.TestDirectory, "Resources", "nb.bin");

            var target = Serializer.Load<NaiveBayes<GeneralDiscreteDistribution>>(fileName);

            replacedert.AreEqual(target.InputCount, 4);
            replacedert.AreEqual(target.ClreplacedCount, 2);
            double logLikelihood;
            double[] responses;

            // Compute the result for a sunny, cool, humid and windy day:
            double[] instance = codebook.Translate("Sunny", "Cool", "High", "Strong").ToDouble();

            int c = target.Compute(instance, out logLikelihood, out responses);

            string result = codebook.Translate("PlayTennis", c);

            replacedert.AreEqual("No", result);
            replacedert.AreEqual(0, c);
            replacedert.AreEqual(0.795, responses[0], 1e-3);
            replacedert.AreEqual(1, responses.Sum(), 1e-10);
            replacedert.IsFalse(double.IsNaN(responses[0]));
            replacedert.AreEqual(2, responses.Length);
        }

19 Source : DecisionStumpTest.cs
with MIT License
from PacktPublishing

[Test]
        public void learn_stump_clreplacedifier()
        {
            #region doc_learn
            // Let's say we want to clreplacedify the following 2-dimensional 
            // data samples into 2 possible clreplacedes, either true or false:
            double[][] inputs =
            {
                new double[] {  10, 42 },
                new double[] { 162, 96 },
                new double[] { 125, 20 },
                new double[] {  96,  6 },
                new double[] {   2, 73 },
                new double[] {  52, 51 },
                new double[] {  71, 49 },
            };

            // And those are their replacedociated clreplaced labels
            bool[] outputs =
            {
                false, false, true, true, false, false, true
            };

            // We create a learning algorithm as:
            var teacher = new ThresholdLearning();

            // Now, we can use the Learn method to learn a clreplacedifier:
            DecisionStump clreplacedifier = teacher.Learn(inputs, outputs);

            // Now, we can check how good it is using a confusion matrix:
            var cm = ConfusionMatrix.Estimate(clreplacedifier, inputs, outputs);

            double error = cm.Error; // should be ~0.14

            // We can also compute the model outputs for new samples using
            bool y = clreplacedifier.Decide(new double[] { 71, 48 }); // should be false
            #endregion

            replacedert.AreEqual(false, y);
            replacedert.AreEqual(0.14285714285714285, error, 1e-10);
        }

19 Source : RandomForestTest.cs
with MIT License
from PacktPublishing

[Test]
        public void LargeRunTest()
        {
            string localPath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "rf");

            #region doc_nursery
            // Fix random seed for reproducibility
            Accord.Math.Random.Generator.Seed = 1;

            // This example uses the Nursery Database available from the University of
            // California Irvine repository of machine learning databases, available at
            //
            //   http://archive.ics.uci.edu/ml/machine-learning-databases/nursery/nursery.names
            //
            // The description paragraph is listed as follows.
            //
            //   Nursery Database was derived from a hierarchical decision model
            //   originally developed to rank applications for nursery schools. It
            //   was used during several years in 1980's when there was excessive
            //   enrollment to these schools in Ljubljana, Slovenia, and the
            //   rejected applications frequently needed an objective
            //   explanation. The final decision depended on three subproblems:
            //   occupation of parents and child's nursery, family structure and
            //   financial standing, and social and health picture of the family.
            //   The model was developed within expert system shell for decision
            //   making DEX (M. Bohanec, V. Rajkovic: Expert system for decision
            //   making. Sistemica 1(1), pp. 145-157, 1990.).
            //

            // Let's begin by loading the raw data. This string variable contains
            // the contents of the nursery.data file as a single, continuous text.
            //
            var nursery = new DataSets.Nursery(path: localPath);
            int[][] inputs = nursery.Instances;
            int[] outputs = nursery.ClreplacedLabels;

            // Now, let's create the forest learning algorithm
            var teacher = new RandomForestLearning(nursery.VariableNames)
            {
                NumberOfTrees = 1,
                SampleRatio = 1.0
            };

            // Finally, learn a random forest from data
            var forest = teacher.Learn(inputs, outputs);

            // We can estimate clreplaced labels using
            int[] predicted = forest.Decide(inputs);

            // And the clreplacedification error (0) can be computed as 
            double error = new ZeroOneLoss(outputs).Loss(forest.Decide(inputs));
            #endregion

            replacedert.AreEqual(0, error, 1e-10);
            replacedert.IsTrue(outputs.IsEqual(predicted));

            replacedert.AreEqual(0, error);

            for (int i = 0; i < inputs.Length; i++)
            {
                int expected = outputs[i];
                int actual = forest.Compute(inputs[i].ToDouble());

                replacedert.AreEqual(expected, actual);
            }
        }

19 Source : SplineTest.cs
with MIT License
from PacktPublishing

[Test]
        public void learn()
        {
            #region doc_learn
            // Let's try to obtain a clreplacedifier for an 
            // example 2D binary clreplacedification dataset:
            var iris = new DataSets.YinYang();
            double[][] inputs = iris.Instances;
            bool[] outputs = iris.ClreplacedLabels;

            // Create a learning algorithm with the Spline kernel
            var smo = new SequentialMinimalOptimization<Spline>()
            {
                // Force a complexity value C or let it be
                // determined automatically by a heuristic:
                // Complexity = 1.5
            };

            // Use it to learn a new s.v. machine
            var svm = smo.Learn(inputs, outputs);

            // Now we can compute predicted values
            bool[] predicted = svm.Decide(inputs);

            // And check how far we are from the expected values
            double error = new ZeroOneLoss(outputs).Loss(predicted); // error will be 0.20
            #endregion

            replacedert.AreEqual(0.2, error, 1e-6);
        }

19 Source : ProbabilisticCoordinateDescentTest.cs
with MIT License
from PacktPublishing

[Test]
        public void RunTest()
        {
            double[][] input =
            {
                new double[] { 55, 0 }, // 0 - no cancer
                new double[] { 28, 0 }, // 0
                new double[] { 65, 1 }, // 0
                new double[] { 46, 0 }, // 1 - have cancer
                new double[] { 86, 1 }, // 1
                new double[] { 56, 1 }, // 1
                new double[] { 85, 0 }, // 0
                new double[] { 33, 0 }, // 0
                new double[] { 21, 1 }, // 0
                new double[] { 42, 1 }, // 1
            };

            double[] output =
            {
                0, 0, 0, 1, 1, 1, 0, 0, 0, 1
            };

            int[] labels = output.Apply(x => x > 0 ? +1 : -1);

            var svm = new SupportVectorMachine(inputs: 2);
            replacedert.AreEqual(2, svm.NumberOfInputs);
            replacedert.AreEqual(1, svm.NumberOfOutputs);
            replacedert.AreEqual(2, svm.NumberOfClreplacedes);
            var teacher = new ProbabilisticCoordinateDescent(svm, input, labels);

            teacher.Tolerance = 1e-10;
            teacher.Complexity = 1e+10;

            replacedert.IsFalse(svm.IsProbabilistic);
            double error = teacher.Run();
            replacedert.IsTrue(svm.IsProbabilistic);

            var regression = LogisticRegression.FromWeights(svm.ToWeights());

            double[] actual = new double[output.Length];
            for (int i = 0; i < actual.Length; i++)
                actual[i] = regression.Compute(input[i]);

            double ageOdds = regression.GetOddsRatio(1); // 1.0208597028836701
            double smokeOdds = regression.GetOddsRatio(2); // 5.8584748789881331

            replacedert.AreEqual(0.2, error);
            replacedert.AreEqual(1.0208597028836701, ageOdds, 1e-4);
            replacedert.AreEqual(5.8584748789881331, smokeOdds, 1e-4);

            replacedert.IsFalse(Double.IsNaN(ageOdds));
            replacedert.IsFalse(Double.IsNaN(smokeOdds));

            replacedert.AreEqual(-2.4577464307294092, regression.Intercept, 1e-8);
            replacedert.AreEqual(-2.4577464307294092, regression.Coefficients[0], 1e-8);
            replacedert.AreEqual(0.020645118265359252, regression.Coefficients[1], 1e-8);
            replacedert.AreEqual(1.7678893101571855, regression.Coefficients[2], 1e-8);
        }

19 Source : ProbabilisticCoordinateDescentTest.cs
with MIT License
from PacktPublishing

[Test]
        public void RunTest2()
        {
            var dataset = SequentialMinimalOptimizationTest.GetYingYang();

            double[][] inputs = dataset.Submatrix(null, 0, 1).ToJagged();
            int[] labels = dataset.GetColumn(2).ToInt32();

            var svm = new SupportVectorMachine(inputs: 2);
            var teacher = new ProbabilisticCoordinateDescent(svm, inputs, labels);

            teacher.Tolerance = 1e-10;
            teacher.Complexity = 1e+10;

            double error = teacher.Run();

            double[] weights = svm.ToWeights();

            replacedert.AreEqual(0.11, error);
            replacedert.AreEqual(3, weights.Length);
            replacedert.AreEqual(-1.3231203367770932, weights[0], 1e-8);
            replacedert.AreEqual(-3.0227742288788493, weights[1], 1e-8);
            replacedert.AreEqual(-0.73074823290553259, weights[2], 1e-8);

            replacedert.AreEqual(svm.Threshold, weights[0]);
        }

19 Source : ProbabilisticCoordinateDescentTest.cs
with MIT License
from PacktPublishing

[Test]
        public void KernelTest1()
        {
            var dataset = SequentialMinimalOptimizationTest.GetYingYang();
            double[][] inputs = dataset.Submatrix(null, 0, 1).ToJagged();
            int[] labels = dataset.GetColumn(2).ToInt32();

            double e1, e2;
            double[] w1, w2;

            {
                Accord.Math.Random.Generator.Seed = 0;

                var svm = new SupportVectorMachine(inputs: 2);
                var teacher = new ProbabilisticCoordinateDescent(svm, inputs, labels);

                teacher.Tolerance = 1e-10;
                teacher.Complexity = 1e+10;

                e1 = teacher.Run();
                w1 = svm.ToWeights();
            }

            {
                Accord.Math.Random.Generator.Seed = 0;

                var svm = new KernelSupportVectorMachine(new Linear(0), inputs: 2);
                var teacher = new ProbabilisticCoordinateDescent(svm, inputs, labels);

                teacher.Tolerance = 1e-10;
                teacher.Complexity = 1e+10;

                e2 = teacher.Run();
                w2 = svm.ToWeights();
            }

            replacedert.AreEqual(e1, e2);
            replacedert.AreEqual(w1.Length, w2.Length);
            replacedert.AreEqual(w1[0], w2[0], 1e-8);
            replacedert.AreEqual(w1[1], w2[1], 1e-8);
            replacedert.AreEqual(w1[2], w2[2], 1e-8);
        }

19 Source : ProbabilisticCoordinateDescentTest.cs
with MIT License
from PacktPublishing

[Test]
        public void logistic_regression_test()
        {
            #region doc_logreg
            // Declare some training data. This is exactly the same
            // data used in the LogisticRegression doreplacedentation page

            // Suppose we have the following data about some patients.
            // The first variable is continuous and represent patient
            // age. The second variable is dichotomic and give whether
            // they smoke or not (This is completely fictional data).

            // We also know if they have had lung cancer or not, and 
            // we would like to know whether smoking has any connection
            // with lung cancer (This is completely fictional data).

            double[][] input =
            {              // age, smokes?, had cancer?
                new double[] { 55,    0  }, // false - no cancer
                new double[] { 28,    0  }, // false
                new double[] { 65,    1  }, // false
                new double[] { 46,    0  }, // true  - had cancer
                new double[] { 86,    1  }, // true
                new double[] { 56,    1  }, // true
                new double[] { 85,    0  }, // false
                new double[] { 33,    0  }, // false
                new double[] { 21,    1  }, // false
                new double[] { 42,    1  }, // true
            };

            double[] output = // Whether each patient had lung cancer or not
            {
                0, 0, 0, 1, 1, 1, 0, 0, 0, 1
            };

            // Create the L1-regularization learning algorithm
            var teacher = new ProbabilisticCoordinateDescent()
            {
                Tolerance = 1e-10,
                Complexity = 1e+10, // learn a hard-margin model
            };

            // Learn the L1-regularized machine
            var svm = teacher.Learn(input, output);

            // Convert the svm to logistic regression
            var regression = (LogisticRegression)svm;

            // Compute the predicted outcome for inputs
            bool[] predicted = regression.Decide(input);

            // Compute log-likelihood scores for the outputs
            double[] scores = regression.Score(input);

            // Compute odds-ratio as in the LogisticRegression example
            double ageOdds = regression.GetOddsRatio(1);   // 1.0208597029158772
            double smokeOdds = regression.GetOddsRatio(2); // 5.8584748789881331

            // Compute the clreplacedification error as in SVM example
            double error = new ZeroOneLoss(output).Loss(predicted);
            #endregion

            replacedert.AreEqual(2, regression.NumberOfInputs);
            replacedert.AreEqual(1, regression.NumberOfOutputs); 
            replacedert.AreEqual(2, regression.NumberOfClreplacedes); 

            var rsvm = (SupportVectorMachine)regression;
            replacedert.AreEqual(2, rsvm.NumberOfInputs);
            replacedert.AreEqual(1, rsvm.NumberOfOutputs);
            replacedert.AreEqual(2, rsvm.NumberOfClreplacedes);

            double[] svmpred = svm.Score(input);
            replacedert.IsTrue(scores.IsEqual(svmpred, 1e-10));

            replacedert.AreEqual(0.2, error);
            replacedert.AreEqual(1.0208597029158772, ageOdds, 1e-4);
            replacedert.AreEqual(5.8584748789881331, smokeOdds, 1e-4);

            replacedert.AreEqual(-2.4577464307294092, regression.Intercept, 1e-8);
            replacedert.AreEqual(-2.4577464307294092, regression.Coefficients[0], 1e-8);
            replacedert.AreEqual(0.020645118265359252, regression.Coefficients[1], 1e-8);
            replacedert.AreEqual(1.7678893101571855, regression.Coefficients[2], 1e-8);
        }

19 Source : ProbabilisticDualCoordinateDescentTest.cs
with MIT License
from PacktPublishing

[Test]
        public void logistic_regression_test2()
        {
            Accord.Math.Random.Generator.Seed = 0;

            #region doc_logreg
            // Declare some training data. This is exactly the same
            // data used in the LogisticRegression doreplacedentation page

            // Suppose we have the following data about some patients.
            // The first variable is continuous and represent patient
            // age. The second variable is dichotomic and give whether
            // they smoke or not (This is completely fictional data).

            // We also know if they have had lung cancer or not, and 
            // we would like to know whether smoking has any connection
            // with lung cancer (This is completely fictional data).

            double[][] input =
            {              // age, smokes?, had cancer?
                new double[] { 55,    0  }, // false - no cancer
                new double[] { 28,    0  }, // false
                new double[] { 65,    1  }, // false
                new double[] { 46,    0  }, // true  - had cancer
                new double[] { 86,    1  }, // true
                new double[] { 56,    1  }, // true
                new double[] { 85,    0  }, // false
                new double[] { 33,    0  }, // false
                new double[] { 21,    1  }, // false
                new double[] { 42,    1  }, // true
            };

            double[] output = // Whether each patient had lung cancer or not
            {
                0, 0, 0, 1, 1, 1, 0, 0, 0, 1
            };

            // Create the probabilistic-SVM learning algorithm
            var teacher = new ProbabilisticDualCoordinateDescent()
            {
                Tolerance = 1e-10,
                Complexity = 1e+10, // learn a hard-margin model
            };

            // Learn the support vector machine
            var svm = teacher.Learn(input, output);

            // Convert the svm to logistic regression
            var regression = (LogisticRegression)svm;

            // Compute the predicted outcome for inputs
            bool[] predicted = regression.Decide(input);

            // Compute probability scores for the outputs
            double[] scores = regression.Probability(input);

            // Compute odds-ratio as in the LogisticRegression example
            double ageOdds = regression.GetOddsRatio(1);   // 1.0430443799578411
            double smokeOdds = regression.GetOddsRatio(2); // 7.2414593749145508

            // Compute the clreplacedification error as in SVM example
            double error = new ZeroOneLoss(output).Loss(predicted);
            #endregion

            var rsvm = (SupportVectorMachine)regression;
            replacedert.AreEqual(2, rsvm.NumberOfInputs);
            replacedert.AreEqual(1, rsvm.NumberOfOutputs);
            replacedert.AreEqual(2, rsvm.NumberOfClreplacedes);
            double[] svmpred = svm.Probability(input);
            replacedert.IsTrue(scores.IsEqual(svmpred, 1e-10));

            replacedert.AreEqual(0.4, error);
            replacedert.AreEqual(1.0430443799578411, ageOdds, 1e-4);
            replacedert.AreEqual(7.2414593749145508, smokeOdds, 1e-4);

            replacedert.AreEqual(-21.4120677536517, regression.Intercept, 1e-8);
            replacedert.AreEqual(-21.4120677536517, regression.Coefficients[0], 1e-8);
            replacedert.AreEqual(0.042143725408546939, regression.Coefficients[1], 1e-8);
            replacedert.AreEqual(1.9798227572056906, regression.Coefficients[2], 1e-8);
        }

19 Source : FanChenLinSequentialMinimalOptimizationRegressionTest.cs
with MIT License
from PacktPublishing

[Test]
        public void learn_test_polynomial()
        {
            Accord.Math.Random.Generator.Seed = 0;

            // Example regression problem. Suppose we are trying
            // to model the following equation: f(x, y) = 2x + y

            double[][] inputs = // (x, y)
            {
                new double[] { 0,  1 }, // 2*0 + 1 =  1
                new double[] { 4,  3 }, // 2*4 + 3 = 11
                new double[] { 8, -8 }, // 2*8 - 8 =  8
                new double[] { 2,  2 }, // 2*2 + 2 =  6
                new double[] { 6,  1 }, // 2*6 + 1 = 13
                new double[] { 5,  4 }, // 2*5 + 4 = 14
                new double[] { 9,  1 }, // 2*9 + 1 = 19
                new double[] { 1,  6 }, // 2*1 + 6 =  8
            };

            double[] outputs = // f(x, y)
            {
                1, 11, 8, 6, 13, 14, 19, 8
            };

            // Create the sequential minimal optimization teacher
            var learn = new FanChenLinSupportVectorRegression<Polynomial>()
            {
                Kernel = new Polynomial(degree: 1, constant: 0),
                Complexity = 100
            };

            // Run the learning algorithm
            SupportVectorMachine<Polynomial> svm = learn.Learn(inputs, outputs);

            // Compute the predicted scores
            double[] predicted = svm.Score(inputs);

            // Compute the error between the expected and predicted
            double error = new SquareLoss(outputs).Loss(predicted);

            // Compute the answer for one particular example
            double fxy = svm.Score(inputs[0]); // 1.000776033448912

            replacedert.AreEqual(1.0, fxy, 1e-3);
            for (int i = 0; i < outputs.Length; i++)
                replacedert.AreEqual(outputs[i], predicted[i], 2e-3);
        }

19 Source : LinearCoordinateDescentTest.cs
with MIT License
from PacktPublishing

[Test]
        public void linear_regression_test()
        {
            #region doc_linreg
            // Declare some training data. This is exactly the same
            // data used in the MultipleLinearRegression doreplacedentation page

            // We will try to model a plane as an equation in the form
            // "ax + by + c = z". We have two input variables (x and y)
            // and we will be trying to find two parameters a and b and 
            // an intercept term c.

            // Create the linear-SVM learning algorithm
            var teacher = new LinearCoordinateDescent()
            {
                Tolerance = 1e-10,
                Complexity = 1e+10, // learn a hard-margin model
            };

            // Now suppose you have some points
            double[][] inputs =
            {
                new double[] { 1, 1 },
                new double[] { 0, 1 },
                new double[] { 1, 0 },
                new double[] { 0, 0 },
            };

            // located in the same Z (z = 1)
            double[] outputs = { 1, 1, 1, 1 };

            // Learn the support vector machine
            var svm = teacher.Learn(inputs, outputs);

            // Convert the svm to logistic regression
            var regression = (MultipleLinearRegression)svm;

            // As result, we will be given the following:
            double a = regression.Weights[0]; // a = 0
            double b = regression.Weights[1]; // b = 0
            double c = regression.Intercept;  // c = 1

            // This is the plane described by the equation
            // ax + by + c = z => 0x + 0y + 1 = z => 1 = z.

            // We can compute the predicted points using
            double[] predicted = regression.Transform(inputs);

            // And the squared error loss using 
            double error = new SquareLoss(outputs).Loss(predicted);
            #endregion

            var rsvm = (SupportVectorMachine)regression;
            replacedert.AreEqual(2, rsvm.NumberOfInputs);
            replacedert.AreEqual(2, rsvm.NumberOfClreplacedes);
            replacedert.AreEqual(1, rsvm.NumberOfOutputs);

            replacedert.AreEqual(2, regression.NumberOfInputs);
            replacedert.AreEqual(1, regression.NumberOfOutputs);

            replacedert.AreEqual(0.0, a, 1e-6);
            replacedert.AreEqual(0.0, b, 1e-6);
            replacedert.AreEqual(1.0, c, 1e-6);
            replacedert.AreEqual(0.0, error, 1e-6);

            double[] expected = regression.Compute(inputs);
            double[] actual = regression.Transform(inputs);
            replacedert.IsTrue(expected.IsEqual(actual, 1e-10));

            double r = regression.CoefficientOfDetermination(inputs, outputs);
            replacedert.AreEqual(1.0, r);
        }

19 Source : LinearDualCoordinateDescentTest.cs
with MIT License
from PacktPublishing

[Test]
        public void linear_regression_test()
        {
            #region doc_linreg
            // Declare some training data. This is exactly the same
            // data used in the MultipleLinearRegression doreplacedentation page

            // We will try to model a plane as an equation in the form
            // "ax + by + c = z". We have two input variables (x and y)
            // and we will be trying to find two parameters a and b and 
            // an intercept term c.

            // Create the linear-SVM learning algorithm
            var teacher = new LinearDualCoordinateDescent()
            {
                Tolerance = 1e-10,
                Complexity = 1e+10, // learn a hard-margin model
            };

            // Now suppose you have some points
            double[][] inputs =
            {
                new double[] { 1, 1 },
                new double[] { 0, 1 },
                new double[] { 1, 0 },
                new double[] { 0, 0 },
            };

            // located in the same Z (z = 1)
            double[] outputs = { 1, 1, 1, 1 };

            // Learn the support vector machine
            var svm = teacher.Learn(inputs, outputs);

            // Convert the svm to logistic regression
            var regression = (MultipleLinearRegression)svm;

            // As result, we will be given the following:
            double a = regression.Weights[0]; // a = 0
            double b = regression.Weights[1]; // b = 0
            double c = regression.Intercept;  // c = 1

            // This is the plane described by the equation
            // ax + by + c = z => 0x + 0y + 1 = z => 1 = z.

            // We can compute the predicted points using
            double[] predicted = regression.Transform(inputs);

            // And the squared error loss using 
            double error = new SquareLoss(outputs).Loss(predicted);
            #endregion

            var rsvm = (SupportVectorMachine)regression;
            replacedert.AreEqual(2, rsvm.NumberOfInputs);
            replacedert.AreEqual(2, rsvm.NumberOfClreplacedes);
            replacedert.AreEqual(1, rsvm.NumberOfOutputs);
            double[] svmpred = svm.Score(inputs);
            replacedert.IsTrue(predicted.IsEqual(svmpred));

            replacedert.AreEqual(2, regression.NumberOfInputs);
            replacedert.AreEqual(1, regression.NumberOfOutputs);

            replacedert.AreEqual(0.0, a, 1e-6);
            replacedert.AreEqual(0.0, b, 1e-6);
            replacedert.AreEqual(1.0, c, 1e-6);
            replacedert.AreEqual(0.0, error, 1e-6);

            double[] expected = regression.Compute(inputs);
            double[] actual = regression.Transform(inputs);
            replacedert.IsTrue(expected.IsEqual(actual, 1e-10));

            double r = regression.CoefficientOfDetermination(inputs, outputs);
            replacedert.AreEqual(1.0, r);
        }

19 Source : ComplexSignalTest.cs
with MIT License
from PacktPublishing

[Test]
        public void GetEnergyTest()
        {
            ComplexSignal target = ComplexSignal.FromArray(data, 8000);
            double expected = 0.5444;
            double actual = target.GetEnergy();
            replacedert.AreEqual(expected, actual, 1e-4);
        }

19 Source : MFCCTests.cs
with MIT License
from PacktPublishing

[Test]
        public void sig2s2mfcTest()
        {
            string fileName = Path.Combine(TestContext.CurrentContext.TestDirectory, "Resources", "sa1.wav");
            Signal w_sig = loadSignalFromWaveFile(fileName);

            // test for the default result
            MFCC w_mfcc = new MFCC();
            double[,] w_expectedMfccVal =
            {
                {3.91389903, -0.63428996, -0.73083372, -0.27443219, -0.54074218, -0.21305643, -0.33009162,  0.01223665, 0.45162122, 0.15050475, -0.09619379, -0.13371888, 0.18935507},
                {3.69534644, -0.68937544, -0.60949647, -0.09637816, -0.31463132, -0.32314598, -0.41129398, -0.21011665, 0.06462785, -0.07624873, -0.20837106, -0.14339344, 0.07252984},
                {3.25482565e+00, -7.15718205e-01, -5.04548111e-01, -1.97574580e-01, -3.15717510e-01, -1.74628550e-01, -3.20555008e-01, -3.06257583e-03, 1.83161036e-01, 8.65540658e-02, -9.35392446e-02, -1.52130545e-01, 4.03793471e-03},
                {3.07159893, -0.62133048, -0.37605836, -0.18068081, -0.21762302, -0.18910972, -0.32020514, -0.07012279,  0.12233751, 0.02942084, -0.04949337, -0.04563318, 0.11019492 },
                {2.74340933e+00, -7.08988732e-01, -2.99531037e-01, -1.98055215e-01, -3.02207415e-01, -3.13485271e-01, -2.80700575e-01, 1.67755943e-02, 1.61175304e-01, -2.20894251e-04, -4.25688705e-02, -9.82638399e-02, 3.37645901e-02}
            };
            double[][] w_actualMfccSig = w_mfcc.ProcessSignal(w_sig);

            replacedert.AreEqual(w_actualMfccSig.Rows(), 283);
            replacedert.AreEqual(w_actualMfccSig.Columns(), 13);

            for (int w_i = 0; w_i < 5; w_i++)
                for (int w_j = 0; w_j < 13; w_j++)
                    replacedert.AreEqual(w_expectedMfccVal[w_i, w_j], w_actualMfccSig[w_i][w_j], 0.000001);

            //test for MFCC parameter nfilt = 25, ncep = 12, lowerf = 130.0, upperf = 6800.0, alpha = 0.87
            w_mfcc = new MFCC(25, 12, 130.0, 6800.0, 0.87);
            double[,] w_expectedMfccVal_1 =
            {
                {4.13175446, -0.45746458, -0.65432219, -0.2140113, -0.49878507, -0.26639248, -0.43689749, 0.00970135, 0.41347535, 0.09463917, -0.07951737, -0.06289812},
                {3.90927554, -0.50781141, -0.47409891, 0.01831778, -0.23448214, -0.29941623, -0.41492151, -0.17115635, 0.08229384, -0.09572194, -0.19925789, -0.11871483},
                {3.4429235, -0.52405457, -0.37301593, -0.11474238, -0.24959892, -0.16173184, -0.32337604, -0.00942962, 0.1586781, 0.05343699, -0.10842342, -0.12708318},
                {3.26886752, -0.44637767, -0.23872891, -0.09883555, -0.17149914, -0.23496224, -0.37151199, -0.0946323, 0.08348784, -0.02832338, -0.06774762, -0.03713541},
                {2.88170975, -0.54571717, -0.19885646, -0.10661848, -0.24702927, -0.273381, -0.27160955, 0.00899187, 0.16025249, 0.00751152, -0.03147983, -0.07606966}
            };

            double[][] w_actualMfccSig_1 = w_mfcc.ProcessSignal(w_sig);

            replacedert.AreEqual((int)w_actualMfccSig_1.Columns(), (int)12);

            for (int w_i = 0; w_i < 5; w_i++)
                for (int w_j = 0; w_j < 12; w_j++)
                    replacedert.AreEqual(w_expectedMfccVal_1[w_i, w_j], w_actualMfccSig_1[w_i][w_j], 0.000001);

            //test for MFCC parameter nfilt = 25, ncep = 12, lowerf = 130.0, upperf = 6800.0, alpha = 0.87
            w_mfcc = new MFCC(35, 12, 130.0, 6890.0, 0.97);
            double[,] w_expectedMfccVal_2 =
            {
                {3.95757826, -0.59464561, -0.69878593, -0.25837055, -0.55493371, -0.25020778, -0.37585643, -0.02871867, 0.42886297, 0.12878995, -0.11080583, -0.11860296},
                {3.76931287, -0.6459003, -0.56337162, -0.04560939, -0.29495178, -0.32606449, -0.43379464, -0.21751486, 0.06950738, -0.08252954, -0.21313119, -0.13387352},
                {3.31044931, -0.6881805, -0.48189091, -0.16050655, -0.29858016, -0.17695709, -0.33559796, -0.02962035, 0.16261671, 0.06071229, -0.13669495, -0.16597},
                {3.14036623, -0.58641167, -0.33283952, -0.15307373, -0.21683636, -0.22139794, -0.36191339, -0.11101222, 0.08726032, -0.00426344, -0.07244142, -0.03946722},
                {2.80633631e+00, -6.80484182e-01, -2.83551364e-01, -1.66453772e-01, -2.89528527e-01, -3.02580033e-01, -2.85276035e-01, -3.18950002e-04, 1.80481693e-01, 5.06706047e-03, -6.60448306e-02, -1.01470709e-01}
            };

            double[][] w_actualMfccSig_2 = w_mfcc.ProcessSignal(w_sig);

            replacedert.AreEqual((int)w_actualMfccSig_2.Columns(), (int)12);

            for (int w_i = 0; w_i < 5; w_i++)
                for (int w_j = 0; w_j < 12; w_j++)
                    replacedert.AreEqual(w_expectedMfccVal_2[w_i, w_j], w_actualMfccSig_2[w_i][w_j], 0.000001);
        }

19 Source : MFCCTests.cs
with MIT License
from PacktPublishing

[Test]
        public void logspec2s2mfcTest()
        {
            double[,] input =
            {
                { 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.862745098039216, 0, 0.662745098039216, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.529411764705882, 0.129411764705882, 0.262745098039216, 0.996078431372549, 0.996078431372549, 0.996078431372549 },
                { 0.996078431372549, 0.996078431372549, 0.0627450980392157, 0.662745098039216, 0.0627450980392157, 0.862745098039216, 0.996078431372549, 0.996078431372549 }
            };

            double[,] actual = MFCC.logspec2s2mfc(input, 5);

            double[,] expected =
            {
                {0.93382353, -0.06105869, -0.05751603, -0.05176306, -0.04402086},
                {0.75098039, -0.08648009,  0.10238907,  0.01060854, -0.14974026},
                {0.6754902,  -0.09671846,  0.14958308,  0.01470892, -0.14419432},
                {0.64215686, -0.10198394,  0.13978935,  0.0046477,  -0.06169853}
            };

            replacedert.AreEqual(actual.GetLength(0), 4);
            replacedert.AreEqual(actual.GetLength(1), 5);

            for (int w_i = 0; w_i < expected.GetLength(0); w_i++)
            {
                for (int w_j = 0; w_j < expected.GetLength(1); w_j++)
                    replacedert.AreEqual(expected[w_i, w_j], actual[w_i, w_j], 0.000001);
            }
        }

See More Examples