-
Notifications
You must be signed in to change notification settings - Fork 36
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #230 from starlibs/dev
Version 0.2.3 and updated travis config.
- Loading branch information
Showing
27 changed files
with
847 additions
and
862 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
87 changes: 87 additions & 0 deletions
87
JAICore/jaicore-basic/src/test/java/ai/libs/jaicore/basic/metric/ConfusionMetricsTest.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,87 @@ | ||
package ai.libs.jaicore.basic.metric; | ||
|
||
import static org.junit.Assert.assertEquals; | ||
|
||
import java.util.stream.IntStream; | ||
|
||
import org.junit.Test; | ||
|
||
public class ConfusionMetricsTest { | ||
|
||
private static final int[][] a = { { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0 } }; | ||
private static final int[][] b = { { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0 } }; | ||
|
||
private static final int[] tp = { 0 }; | ||
private static final int[] fp = { 2 }; | ||
private static final int[] tn = { 8 }; | ||
private static final int[] fn = { 2 }; | ||
|
||
private static final double[] precision = { 0.0 }; | ||
private static final double[] recall = { 0.0 }; | ||
private static final double[] f1score = { 0.0 }; | ||
|
||
@Test | ||
public void testPrecision() { | ||
for (int i = 0; i < a.length; i++) { | ||
assertEquals("Precision not correct", precision[i], ConfusionMetrics.getPrecision(tp(a[i], b[i]), fp(a[i], b[i])), 1E-8); | ||
} | ||
} | ||
|
||
@Test | ||
public void testRecall() { | ||
for (int i = 0; i < a.length; i++) { | ||
assertEquals("Recall not correct", recall[i], ConfusionMetrics.getRecall(tp(a[i], b[i]), fn(a[i], b[i])), 1E-8); | ||
} | ||
} | ||
|
||
@Test | ||
public void testF1Score() { | ||
for (int i = 0; i < a.length; i++) { | ||
assertEquals("F1Score not correct", f1score[i], ConfusionMetrics.getF1Score(tp(a[i], b[i]), fp(a[i], b[i]), fn(a[i], b[i])), 1E-8); | ||
} | ||
} | ||
|
||
@Test | ||
public void testTP() { | ||
for (int i = 0; i < a.length; i++) { | ||
assertEquals("TP not correct", tp[i], tp(a[i], b[i])); | ||
} | ||
} | ||
|
||
@Test | ||
public void testFP() { | ||
for (int i = 0; i < a.length; i++) { | ||
assertEquals("FP not correct", fp[i], fp(a[i], b[i])); | ||
} | ||
} | ||
|
||
@Test | ||
public void testTN() { | ||
for (int i = 0; i < a.length; i++) { | ||
assertEquals("TN not correct", tn[i], tn(a[i], b[i])); | ||
} | ||
} | ||
|
||
@Test | ||
public void testFN() { | ||
for (int i = 0; i < a.length; i++) { | ||
assertEquals("FN not correct", fn[i], fn(a[i], b[i])); | ||
} | ||
} | ||
|
||
private static int tp(final int[] x, final int[] y) { | ||
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 1 && x[i] == y[i]).count(); | ||
} | ||
|
||
private static int fp(final int[] x, final int[] y) { | ||
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 0 && x[i] != y[i]).count(); | ||
} | ||
|
||
private static int tn(final int[] x, final int[] y) { | ||
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 0 && x[i] == y[i]).count(); | ||
} | ||
|
||
private static int fn(final int[] x, final int[] y) { | ||
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 1 && x[i] != y[i]).count(); | ||
} | ||
} |
126 changes: 63 additions & 63 deletions
126
...ore/jaicore-math/src/main/java/ai/libs/jaicore/math/gradientdescent/BlackBoxGradient.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,63 +1,63 @@ | ||
package ai.libs.jaicore.math.gradientdescent; | ||
|
||
import org.api4.java.common.math.IVector; | ||
|
||
import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector; | ||
|
||
/** | ||
* Difference quotient based gradient estimation. This class will give a | ||
* black-box gradient estimation by simply calculating | ||
* | ||
* (f(x + h) - f(x))/h | ||
* | ||
* where x is the provided point and x' is a point that slightly differs, | ||
* specified by the parameter <code>precision</code>. (Obviously it holds that | ||
* in lim_{precision -> 0} this yields the exact gradient.) | ||
* | ||
* If x is a vector (a_o, ..., a_n), then, instead we calculate each partial | ||
* derivative i by: | ||
* | ||
* (f(a_o, ... a_i +h, ... , a_n) - f((a_o, ..., a_n)))/h | ||
* | ||
* Obviously, this is a highly inefficient approach for estimating the gradient | ||
* (if we have n partial derivatives, we need 2 *n estimations). | ||
* | ||
* @author Mirko J�rgens | ||
* | ||
*/ | ||
public class BlackBoxGradient implements IGradientFunction { | ||
|
||
private final double precision; | ||
|
||
private final IGradientDescendableFunction function; | ||
|
||
/** | ||
* Sets up a gradient-estimator for the given function. The estimation of the gradient can be tuned by the precision parameter. | ||
* | ||
* @param underlyingFunction the function for which the gradient shall be estimated | ||
* @param precision the precision of the estimation, the close this value is to zero the better is the estimation | ||
*/ | ||
public BlackBoxGradient(final IGradientDescendableFunction underlyingFunction, final double precision) { | ||
this.precision = precision; | ||
this.function = underlyingFunction; | ||
} | ||
|
||
@Override | ||
public IVector apply(final IVector xVec) { | ||
IVector gradient = new DenseDoubleVector(xVec.length()); | ||
double fX = this.function.apply(xVec); | ||
IVector xPrime = new DenseDoubleVector(xVec.asArray()); | ||
for (int i = 0; i < xVec.length(); i++) { | ||
if (i > 0) { | ||
xPrime.setValue(i - 1, xPrime.getValue(i - 1) - this.precision); | ||
} | ||
xPrime.setValue(i, xPrime.getValue(i) + this.precision); | ||
// now compute f(x') - f(x) | ||
double fXPrime = this.function.apply(xPrime); | ||
double partial = fXPrime - fX; | ||
gradient.setValue(i, partial); | ||
} | ||
return gradient; | ||
} | ||
|
||
} | ||
package ai.libs.jaicore.math.gradientdescent; | ||
|
||
import org.api4.java.common.math.IVector; | ||
|
||
import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector; | ||
|
||
/** | ||
* Difference quotient based gradient estimation. This class will give a | ||
* black-box gradient estimation by simply calculating | ||
* | ||
* (f(x + h) - f(x))/h | ||
* | ||
* where x is the provided point and x' is a point that slightly differs, | ||
* specified by the parameter <code>precision</code>. (Obviously it holds that | ||
* in lim_{precision -> 0} this yields the exact gradient.) | ||
* | ||
* If x is a vector (a_o, ..., a_n), then, instead we calculate each partial | ||
* derivative i by: | ||
* | ||
* (f(a_o, ... a_i +h, ... , a_n) - f((a_o, ..., a_n)))/h | ||
* | ||
* Obviously, this is a highly inefficient approach for estimating the gradient | ||
* (if we have n partial derivatives, we need 2 *n estimations). | ||
* | ||
* @author Mirko Jürgens | ||
* | ||
*/ | ||
public class BlackBoxGradient implements IGradientFunction { | ||
|
||
private final double precision; | ||
|
||
private final IGradientDescendableFunction function; | ||
|
||
/** | ||
* Sets up a gradient-estimator for the given function. The estimation of the gradient can be tuned by the precision parameter. | ||
* | ||
* @param underlyingFunction the function for which the gradient shall be estimated | ||
* @param precision the precision of the estimation, the close this value is to zero the better is the estimation | ||
*/ | ||
public BlackBoxGradient(final IGradientDescendableFunction underlyingFunction, final double precision) { | ||
this.precision = precision; | ||
this.function = underlyingFunction; | ||
} | ||
|
||
@Override | ||
public IVector apply(final IVector xVec) { | ||
IVector gradient = new DenseDoubleVector(xVec.length()); | ||
double fX = this.function.apply(xVec); | ||
IVector xPrime = new DenseDoubleVector(xVec.asArray()); | ||
for (int i = 0; i < xVec.length(); i++) { | ||
if (i > 0) { | ||
xPrime.setValue(i - 1, xPrime.getValue(i - 1) - this.precision); | ||
} | ||
xPrime.setValue(i, xPrime.getValue(i) + this.precision); | ||
// now compute f(x') - f(x) | ||
double fXPrime = this.function.apply(xPrime); | ||
double partial = fXPrime - fX; | ||
gradient.setValue(i, partial); | ||
} | ||
return gradient; | ||
} | ||
|
||
} |
Oops, something went wrong.