Skip to content

Commit

Permalink
Merge pull request #230 from starlibs/dev
Browse files Browse the repository at this point in the history
Version 0.2.3 and updated travis config.
  • Loading branch information
fmohr authored Jul 27, 2020
2 parents fc32d07 + aeacd22 commit 2d5af88
Show file tree
Hide file tree
Showing 27 changed files with 847 additions and 862 deletions.
4 changes: 2 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ addons:
script:
- ./gradlew compileJava
- ./gradlew testClasses
- git fetch --no-tags --unshallow https://github.com/fmohr/AILibs.git +master:refs/heads/master
- git fetch --no-tags https://github.com/fmohr/AILibs.git +dev:refs/heads/dev
- git fetch --no-tags --unshallow https://github.com/starlibs/AILibs.git +master:refs/heads/master
- git fetch --no-tags https://github.com/starlibs/AILibs.git +dev:refs/heads/dev
- ./gradlew sonarqube -x test

env:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,13 @@ public static double getF1Score(final int tp, final int fp, final int fn) {
* @return The F-Measure score.
*/
public static double getFMeasure(final double beta, final int tp, final int fp, final int fn) {
return (1 + Math.pow(beta, 2)) * getPrecision(tp, fp) * getRecall(tp, fn) / (Math.pow(beta, 2) * getPrecision(tp, fp) + getRecall(tp, fn));
double precision = getPrecision(tp, fp);
double recall = getRecall(tp, fn);
double betaSquare = Math.pow(beta, 2);
if (precision + recall == 0.0) {
return 0.0;
}
return (1 + betaSquare) * precision * recall / (betaSquare * getPrecision(tp, fp) + getRecall(tp, fn));
}

/**
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
package ai.libs.jaicore.basic.metric;

import static org.junit.Assert.assertEquals;

import java.util.stream.IntStream;

import org.junit.Test;

public class ConfusionMetricsTest {

private static final int[][] a = { { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0 } };
private static final int[][] b = { { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0 } };

private static final int[] tp = { 0 };
private static final int[] fp = { 2 };
private static final int[] tn = { 8 };
private static final int[] fn = { 2 };

private static final double[] precision = { 0.0 };
private static final double[] recall = { 0.0 };
private static final double[] f1score = { 0.0 };

@Test
public void testPrecision() {
for (int i = 0; i < a.length; i++) {
assertEquals("Precision not correct", precision[i], ConfusionMetrics.getPrecision(tp(a[i], b[i]), fp(a[i], b[i])), 1E-8);
}
}

@Test
public void testRecall() {
for (int i = 0; i < a.length; i++) {
assertEquals("Recall not correct", recall[i], ConfusionMetrics.getRecall(tp(a[i], b[i]), fn(a[i], b[i])), 1E-8);
}
}

@Test
public void testF1Score() {
for (int i = 0; i < a.length; i++) {
assertEquals("F1Score not correct", f1score[i], ConfusionMetrics.getF1Score(tp(a[i], b[i]), fp(a[i], b[i]), fn(a[i], b[i])), 1E-8);
}
}

@Test
public void testTP() {
for (int i = 0; i < a.length; i++) {
assertEquals("TP not correct", tp[i], tp(a[i], b[i]));
}
}

@Test
public void testFP() {
for (int i = 0; i < a.length; i++) {
assertEquals("FP not correct", fp[i], fp(a[i], b[i]));
}
}

@Test
public void testTN() {
for (int i = 0; i < a.length; i++) {
assertEquals("TN not correct", tn[i], tn(a[i], b[i]));
}
}

@Test
public void testFN() {
for (int i = 0; i < a.length; i++) {
assertEquals("FN not correct", fn[i], fn(a[i], b[i]));
}
}

private static int tp(final int[] x, final int[] y) {
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 1 && x[i] == y[i]).count();
}

private static int fp(final int[] x, final int[] y) {
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 0 && x[i] != y[i]).count();
}

private static int tn(final int[] x, final int[] y) {
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 0 && x[i] == y[i]).count();
}

private static int fn(final int[] x, final int[] y) {
return (int) IntStream.range(0, x.length).filter(i -> x[i] == 1 && x[i] != y[i]).count();
}
}
Original file line number Diff line number Diff line change
@@ -1,63 +1,63 @@
package ai.libs.jaicore.math.gradientdescent;

import org.api4.java.common.math.IVector;

import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector;

/**
* Difference quotient based gradient estimation. This class will give a
* black-box gradient estimation by simply calculating
*
* (f(x + h) - f(x))/h
*
* where x is the provided point and x' is a point that slightly differs,
* specified by the parameter <code>precision</code>. (Obviously it holds that
* in lim_{precision -> 0} this yields the exact gradient.)
*
* If x is a vector (a_o, ..., a_n), then, instead we calculate each partial
* derivative i by:
*
* (f(a_o, ... a_i +h, ... , a_n) - f((a_o, ..., a_n)))/h
*
* Obviously, this is a highly inefficient approach for estimating the gradient
* (if we have n partial derivatives, we need 2 *n estimations).
*
* @author Mirko J�rgens
*
*/
public class BlackBoxGradient implements IGradientFunction {

private final double precision;

private final IGradientDescendableFunction function;

/**
* Sets up a gradient-estimator for the given function. The estimation of the gradient can be tuned by the precision parameter.
*
* @param underlyingFunction the function for which the gradient shall be estimated
* @param precision the precision of the estimation, the close this value is to zero the better is the estimation
*/
public BlackBoxGradient(final IGradientDescendableFunction underlyingFunction, final double precision) {
this.precision = precision;
this.function = underlyingFunction;
}

@Override
public IVector apply(final IVector xVec) {
IVector gradient = new DenseDoubleVector(xVec.length());
double fX = this.function.apply(xVec);
IVector xPrime = new DenseDoubleVector(xVec.asArray());
for (int i = 0; i < xVec.length(); i++) {
if (i > 0) {
xPrime.setValue(i - 1, xPrime.getValue(i - 1) - this.precision);
}
xPrime.setValue(i, xPrime.getValue(i) + this.precision);
// now compute f(x') - f(x)
double fXPrime = this.function.apply(xPrime);
double partial = fXPrime - fX;
gradient.setValue(i, partial);
}
return gradient;
}

}
package ai.libs.jaicore.math.gradientdescent;

import org.api4.java.common.math.IVector;

import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector;

/**
* Difference quotient based gradient estimation. This class will give a
* black-box gradient estimation by simply calculating
*
* (f(x + h) - f(x))/h
*
* where x is the provided point and x' is a point that slightly differs,
* specified by the parameter <code>precision</code>. (Obviously it holds that
* in lim_{precision -> 0} this yields the exact gradient.)
*
* If x is a vector (a_o, ..., a_n), then, instead we calculate each partial
* derivative i by:
*
* (f(a_o, ... a_i +h, ... , a_n) - f((a_o, ..., a_n)))/h
*
* Obviously, this is a highly inefficient approach for estimating the gradient
* (if we have n partial derivatives, we need 2 *n estimations).
*
* @author Mirko Jürgens
*
*/
public class BlackBoxGradient implements IGradientFunction {

private final double precision;

private final IGradientDescendableFunction function;

/**
* Sets up a gradient-estimator for the given function. The estimation of the gradient can be tuned by the precision parameter.
*
* @param underlyingFunction the function for which the gradient shall be estimated
* @param precision the precision of the estimation, the close this value is to zero the better is the estimation
*/
public BlackBoxGradient(final IGradientDescendableFunction underlyingFunction, final double precision) {
this.precision = precision;
this.function = underlyingFunction;
}

@Override
public IVector apply(final IVector xVec) {
IVector gradient = new DenseDoubleVector(xVec.length());
double fX = this.function.apply(xVec);
IVector xPrime = new DenseDoubleVector(xVec.asArray());
for (int i = 0; i < xVec.length(); i++) {
if (i > 0) {
xPrime.setValue(i - 1, xPrime.getValue(i - 1) - this.precision);
}
xPrime.setValue(i, xPrime.getValue(i) + this.precision);
// now compute f(x') - f(x)
double fXPrime = this.function.apply(xPrime);
double partial = fXPrime - fX;
gradient.setValue(i, partial);
}
return gradient;
}

}
Loading

0 comments on commit 2d5af88

Please sign in to comment.